code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __A :
def __init__( self :List[str] , __snake_case :Optional[int] , __snake_case :str=13 , __snake_case :int=7 , __snake_case :List[str]=True , __snake_case :Tuple=True , __snake_case :List[str]=True , __snake_case :Optional[int]=True , __snake_case :Tuple=99 , __snake_case :Tuple=32 , __snake_case :Optional[int]=2 , __snake_case :str=4 , __snake_case :List[Any]=37 , __snake_case :List[Any]="gelu" , __snake_case :str=0.1 , __snake_case :List[str]=0.1 , __snake_case :Tuple=5_12 , __snake_case :List[Any]=16 , __snake_case :Union[str, Any]=2 , __snake_case :Tuple=0.02 , __snake_case :int=False , __snake_case :Optional[Any]=True , __snake_case :Union[str, Any]="None" , __snake_case :str=3 , __snake_case :Any=4 , __snake_case :Dict=None , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : int =batch_size
__magic_name__ : str =seq_length
__magic_name__ : Optional[int] =is_training
__magic_name__ : Dict =use_input_mask
__magic_name__ : Any =use_token_type_ids
__magic_name__ : List[str] =use_labels
__magic_name__ : List[str] =vocab_size
__magic_name__ : Union[str, Any] =hidden_size
__magic_name__ : str =num_hidden_layers
__magic_name__ : Union[str, Any] =num_attention_heads
__magic_name__ : int =intermediate_size
__magic_name__ : List[Any] =hidden_act
__magic_name__ : int =hidden_dropout_prob
__magic_name__ : Dict =attention_probs_dropout_prob
__magic_name__ : int =max_position_embeddings
__magic_name__ : str =type_vocab_size
__magic_name__ : List[Any] =type_sequence_label_size
__magic_name__ : Union[str, Any] =initializer_range
__magic_name__ : int =num_labels
__magic_name__ : str =num_choices
__magic_name__ : int =relative_attention
__magic_name__ : int =position_biased_input
__magic_name__ : Union[str, Any] =pos_att_type
__magic_name__ : List[str] =scope
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any =None
if self.use_input_mask:
__magic_name__ : str =random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Union[str, Any] =None
if self.use_token_type_ids:
__magic_name__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : str =None
__magic_name__ : Optional[int] =None
__magic_name__ : Tuple =None
if self.use_labels:
__magic_name__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : str =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self :int , __snake_case :Any , __snake_case :Union[str, Any] , __snake_case :List[Any] , __snake_case :Tuple , __snake_case :Union[str, Any] , __snake_case :List[str] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =TFDebertaVaModel(config=__snake_case )
__magic_name__ : Tuple ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ : List[str] =[input_ids, input_mask]
__magic_name__ : List[str] =model(__snake_case )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self :List[Any] , __snake_case :List[str] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :List[str] , __snake_case :Optional[Any] , __snake_case :Optional[Any] , __snake_case :Tuple ):
'''simple docstring'''
__magic_name__ : List[str] =TFDebertaVaForMaskedLM(config=__snake_case )
__magic_name__ : Optional[int] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ : int =model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self :Any , __snake_case :Tuple , __snake_case :List[str] , __snake_case :int , __snake_case :Any , __snake_case :List[str] , __snake_case :int , __snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.num_labels
__magic_name__ : Union[str, Any] =TFDebertaVaForSequenceClassification(config=__snake_case )
__magic_name__ : Tuple ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ : int =model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self :Dict , __snake_case :List[Any] , __snake_case :Optional[int] , __snake_case :str , __snake_case :Dict , __snake_case :int , __snake_case :str , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : Dict =self.num_labels
__magic_name__ : Optional[Any] =TFDebertaVaForTokenClassification(config=__snake_case )
__magic_name__ : Optional[Any] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ : int =model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self :List[str] , __snake_case :str , __snake_case :Dict , __snake_case :int , __snake_case :Dict , __snake_case :Dict , __snake_case :Optional[int] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : List[str] =TFDebertaVaForQuestionAnswering(config=__snake_case )
__magic_name__ : str ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ : List[Any] =model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Union[str, Any] =config_and_inputs
__magic_name__ : List[Any] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] =TFDebertaVaModelTester(self )
__magic_name__ : Any =ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Any =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__snake_case )
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def A__ ( self :str ):
'''simple docstring'''
pass
@slow
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : str =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
__magic_name__ : str =tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__magic_name__ : List[str] =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__magic_name__ : Tuple =model(__snake_case , attention_mask=__snake_case )[0]
__magic_name__ : Any =tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 )
| 21 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list ):
'''simple docstring'''
if len(UpperCamelCase ) <= 1:
return lst
_a = 1
while i < len(UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_a , _a = lst[i], lst[i - 1]
i -= 1
if i == 0:
_a = 1
return lst
if __name__ == "__main__":
_snake_case : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 22 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
from torch import nn
def _snake_case (__lowercase):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""")
| 23 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =True
@register_to_config
def __init__( self : Dict , a : int = 3 , a : int = 3 , a : Tuple[str] = ("DownEncoderBlock2D",) , a : Tuple[str] = ("UpDecoderBlock2D",) , a : Tuple[int] = (64,) , a : int = 1 , a : str = "silu" , a : int = 4 , a : int = 32 , a : int = 32 , a : float = 0.1_8215 , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE : List[Any] = Encoder(
in_channels=a , out_channels=a , down_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , double_z=a , )
# pass init params to Decoder
SCREAMING_SNAKE_CASE : Dict = Decoder(
in_channels=a , out_channels=a , up_block_types=a , block_out_channels=a , layers_per_block=a , norm_num_groups=a , act_fn=a , )
SCREAMING_SNAKE_CASE : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
SCREAMING_SNAKE_CASE : Dict = nn.Convad(a , a , 1 )
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
# only relevant if vae tiling is enabled
SCREAMING_SNAKE_CASE : Dict = self.config.sample_size
SCREAMING_SNAKE_CASE : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
SCREAMING_SNAKE_CASE : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.25
def __UpperCamelCase ( self : Optional[Any] , a : Dict , a : Tuple=False ) -> List[str]:
"""simple docstring"""
if isinstance(a , (Encoder, Decoder) ):
SCREAMING_SNAKE_CASE : Any = value
def __UpperCamelCase ( self : Tuple , a : bool = True ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = use_tiling
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
self.enable_tiling(a )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = True
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : Any ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {}
def fn_recursive_add_processors(a : str , a : torch.nn.Module , a : Dict[str, AttentionProcessor] ):
if hasattr(a , "set_processor" ):
SCREAMING_SNAKE_CASE : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , a , a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a , a , a )
return processors
def __UpperCamelCase ( self : Tuple , a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(a , a ) and len(a ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(a )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(a : str , a : torch.nn.Module , a : Any ):
if hasattr(a , "set_processor" ):
if not isinstance(a , a ):
module.set_processor(a )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , a , a )
for name, module in self.named_children():
fn_recursive_attn_processor(a , a , a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCamelCase ( self : Optional[Any] , a : torch.FloatTensor , a : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(a , return_dict=a )
if self.use_slicing and x.shape[0] > 1:
SCREAMING_SNAKE_CASE : str = [self.encoder(a ) for x_slice in x.split(1 )]
SCREAMING_SNAKE_CASE : Any = torch.cat(a )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder(a )
SCREAMING_SNAKE_CASE : Any = self.quant_conv(a )
SCREAMING_SNAKE_CASE : Optional[Any] = DiagonalGaussianDistribution(a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a )
def __UpperCamelCase ( self : List[str] , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(a , return_dict=a )
SCREAMING_SNAKE_CASE : int = self.post_quant_conv(a )
SCREAMING_SNAKE_CASE : List[Any] = self.decoder(a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
@apply_forward_hook
def __UpperCamelCase ( self : str , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
SCREAMING_SNAKE_CASE : List[str] = [self._decode(a ).sample for z_slice in z.split(1 )]
SCREAMING_SNAKE_CASE : Any = torch.cat(a )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self._decode(a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=a )
def __UpperCamelCase ( self : List[Any] , a : Union[str, Any] , a : Optional[int] , a : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = min(a.shape[2] , b.shape[2] , a )
for y in range(a ):
SCREAMING_SNAKE_CASE : List[str] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCamelCase ( self : Any , a : Optional[int] , a : Optional[Any] , a : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = min(a.shape[3] , b.shape[3] , a )
for x in range(a ):
SCREAMING_SNAKE_CASE : Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCamelCase ( self : Dict , a : torch.FloatTensor , a : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE : Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , x.shape[2] , a ):
SCREAMING_SNAKE_CASE : Dict = []
for j in range(0 , x.shape[3] , a ):
SCREAMING_SNAKE_CASE : Dict = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.encoder(a )
SCREAMING_SNAKE_CASE : Optional[int] = self.quant_conv(a )
row.append(a )
rows.append(a )
SCREAMING_SNAKE_CASE : Tuple = []
for i, row in enumerate(a ):
SCREAMING_SNAKE_CASE : List[str] = []
for j, tile in enumerate(a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = self.blend_v(rows[i - 1][j] , a , a )
if j > 0:
SCREAMING_SNAKE_CASE : str = self.blend_h(row[j - 1] , a , a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a , dim=3 ) )
SCREAMING_SNAKE_CASE : List[str] = torch.cat(a , dim=2 )
SCREAMING_SNAKE_CASE : List[str] = DiagonalGaussianDistribution(a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a )
def __UpperCamelCase ( self : Optional[int] , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
SCREAMING_SNAKE_CASE : str = []
for i in range(0 , z.shape[2] , a ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
for j in range(0 , z.shape[3] , a ):
SCREAMING_SNAKE_CASE : str = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
SCREAMING_SNAKE_CASE : Optional[int] = self.post_quant_conv(a )
SCREAMING_SNAKE_CASE : List[Any] = self.decoder(a )
row.append(a )
rows.append(a )
SCREAMING_SNAKE_CASE : Any = []
for i, row in enumerate(a ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for j, tile in enumerate(a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE : List[str] = self.blend_v(rows[i - 1][j] , a , a )
if j > 0:
SCREAMING_SNAKE_CASE : List[str] = self.blend_h(row[j - 1] , a , a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a , dim=3 ) )
SCREAMING_SNAKE_CASE : Dict = torch.cat(a , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
def __UpperCamelCase ( self : List[Any] , a : torch.FloatTensor , a : bool = False , a : bool = True , a : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sample
SCREAMING_SNAKE_CASE : Optional[int] = self.encode(a ).latent_dist
if sample_posterior:
SCREAMING_SNAKE_CASE : Optional[int] = posterior.sample(generator=a )
else:
SCREAMING_SNAKE_CASE : Tuple = posterior.mode()
SCREAMING_SNAKE_CASE : List[str] = self.decode(a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a ) | 25 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( _lowerCamelCase ) -> list[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Dict = 11
__snake_case : List[Any] = int("""1""" + """0""" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__snake_case : str = 10
return solutions
def _a ( _lowerCamelCase = 2 ) -> int:
"""simple docstring"""
__snake_case : List[Any] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
__snake_case : List[Any] = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__A : Optional[Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ):
_A = d_model
_A = parent
_A = batch_size
_A = prediction_length
_A = context_length
_A = cardinality
_A = num_time_features
_A = lags_sequence
_A = embedding_dimension
_A = is_training
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = context_length
_A = prediction_length + label_length
_A = label_length
_A = moving_average
_A = autocorrelation_factor
def lowerCAmelCase__ ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = config.context_length + max(config.lags_sequence )
_A = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_A = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_A = floats_tensor([self.batch_size, _past_length] )
_A = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_A = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_A = floats_tensor([self.batch_size, config.prediction_length] )
_A = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def lowerCAmelCase__ ( self ):
_A = self.get_config()
_A = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowerCAmelCase__ ( self ):
_A, _A = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_A = model(**snake_case_ )
_A = outputs.encoder_last_hidden_state
_A = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_A = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_A = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_A, _A, _A, _A, _A = model.create_network_inputs(**snake_case_ )
_A, _A = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_A = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_A = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_A = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_A = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_A = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_A = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_A = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_A = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__magic_name__ = (AutoformerForPrediction,) if is_torch_available() else ()
__magic_name__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = AutoformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_A, _A = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info['missing_keys'] , [] )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason='Model has no tokens embeddings' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A = inspect.signature(getattr(snake_case_ , 'forward' ) )
# The main input is the name of the argument after `self`
_A = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = getattr(self.model_tester , 'seq_length' , snake_case_ )
_A = getattr(self.model_tester , 'decoder_seq_length' , snake_case_ )
_A = getattr(self.model_tester , 'encoder_seq_length' , snake_case_ )
_A = getattr(self.model_tester , 'd_model' , snake_case_ )
_A = getattr(self.model_tester , 'num_attention_heads' , snake_case_ )
_A = d_model // num_attention_heads
for model_class in self.all_model_classes:
_A = True
_A = False
_A = True
_A = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_A = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_A = len(snake_case_ )
_A = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_A = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_A = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase__ ( self ):
super().test_retain_grad_hidden_states_attentions()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE="train-batch.pt" ) -> Any:
"""simple docstring"""
_A = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case_ )
_A = prepare_batch()
with torch.no_grad():
_A = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
_A = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self ):
_A = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case_ )
_A = prepare_batch('val-batch.pt' )
with torch.no_grad():
_A = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
_A = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self ):
_A = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case_ )
_A = prepare_batch('val-batch.pt' )
with torch.no_grad():
_A = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
_A = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_A = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case_ )
_A = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1E-1 ) )
| 27 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCAmelCase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ):
lowerCamelCase_ = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ = 2_000_000 ):
return sum(takewhile(lambda lowerCAmelCase__ : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__a = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__a = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__a = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''http://www.cs.umd.edu/~snover/tercom/''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] ,reference_urls=[
'''https://github.com/jhclark/tercom''',
] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,) -> Optional[Any]:
UpperCAmelCase_ : Tuple = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
UpperCAmelCase_ : str = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Tuple = TER(
normalized=_SCREAMING_SNAKE_CASE ,no_punct=_SCREAMING_SNAKE_CASE ,asian_support=_SCREAMING_SNAKE_CASE ,case_sensitive=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Optional[Any] = sb_ter.corpus_score(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 30 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_UpperCAmelCase = nums[0]
_UpperCAmelCase = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
)
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = os.path.abspath(__lowerCAmelCase )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
snake_case__ = tf.train.list_variables(__lowerCAmelCase )
snake_case__ = []
snake_case__ = []
snake_case__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case__ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case__ = name[1:]
# figure out how many levels deep the name is
snake_case__ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(__lowerCAmelCase )
# read data
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
names.append('''/'''.join(__lowerCAmelCase ) )
arrays.append(__lowerCAmelCase )
logger.info(F"""Read a total of {len(__lowerCAmelCase ):,} layers""" )
# Sanity check
if len(set(__lowerCAmelCase ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__lowerCAmelCase ) )})""" )
snake_case__ = list(set(__lowerCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = full_name.split('''/''' )
snake_case__ = model
snake_case__ = []
for i, m_name in enumerate(__lowerCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
snake_case__ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
snake_case__ = getattr(__lowerCAmelCase , '''embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
snake_case__ = getattr(__lowerCAmelCase , '''encoder''' )
snake_case__ = getattr(__lowerCAmelCase , '''layer''' )
snake_case__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
snake_case__ = getattr(__lowerCAmelCase , '''pooler''' )
snake_case__ = getattr(__lowerCAmelCase , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''token_type_embeddings''' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
snake_case__ = getattr(__lowerCAmelCase , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
snake_case__ = getattr(__lowerCAmelCase , '''attention''' )
snake_case__ = getattr(__lowerCAmelCase , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
snake_case__ = getattr(__lowerCAmelCase , '''attention''' )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
snake_case__ = getattr(__lowerCAmelCase , '''attention''' )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
snake_case__ = getattr(__lowerCAmelCase , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
snake_case__ = getattr(__lowerCAmelCase , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
snake_case__ = getattr(__lowerCAmelCase , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
snake_case__ = getattr(__lowerCAmelCase , '''intermediate''' )
snake_case__ = getattr(__lowerCAmelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
snake_case__ = getattr(__lowerCAmelCase , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
snake_case__ = getattr(__lowerCAmelCase , '''weight''' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
snake_case__ = '''.'''.join(__lowerCAmelCase )
if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __lowerCAmelCase ) or re.match(
r'''(\S+)\.attention\.output\.dense\.weight''' , __lowerCAmelCase ):
snake_case__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case__ = array.transpose()
if pointer.shape == array.shape:
snake_case__ = torch.from_numpy(__lowerCAmelCase )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Instantiate model
logger.info(F"""Loading model based on config from {config_path}...""" )
snake_case__ = BertConfig.from_json_file(__lowerCAmelCase )
snake_case__ = BertModel(__lowerCAmelCase )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowerCamelCase__ : Tuple = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 33 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = PhobertTokenizer
A_ = False
def UpperCAmelCase__ ( self) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = ['''#version: 0.2''', '''l à</w>''']
UpperCamelCase = {'''unk_token''': '''<unk>'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowerCamelCase_))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = '''Tôi là VinAI Research'''
UpperCamelCase = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase = '''Tôi là VinAI Research'''
UpperCamelCase = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_)
print(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_) , lowerCamelCase_) | 34 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> tuple[float, list[float]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(range(len(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Any = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
SCREAMING_SNAKE_CASE__ : float = 0
SCREAMING_SNAKE_CASE__ : list[float] = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE__ : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase ( __A : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case : str = []
snake_case : Optional[int] = []
snake_case : str = []
for rt in rc.restypes:
snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case : List[str] = {name: i for i, name in enumerate(__A )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
snake_case : List[Any] = torch.tensor(
__A , dtype=torch.intaa , device=protein["""aatype"""].device , )
snake_case : int = torch.tensor(
__A , dtype=torch.intaa , device=protein["""aatype"""].device , )
snake_case : Tuple = torch.tensor(
__A , dtype=torch.floataa , device=protein["""aatype"""].device , )
snake_case : List[str] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case : Any = restype_atomaa_to_atomaa[protein_aatype]
snake_case : Dict = restype_atomaa_mask[protein_aatype]
snake_case : str = residx_atomaa_mask
snake_case : Optional[int] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case : Tuple = restype_atomaa_to_atomaa[protein_aatype]
snake_case : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case : int = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case : Optional[Any] = rc.restype_atoa[restype_letter]
snake_case : Union[str, Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case : int = rc.atom_order[atom_name]
snake_case : Union[str, Any] = 1
snake_case : Any = restype_atomaa_mask[protein_aatype]
snake_case : Union[str, Any] = residx_atomaa_mask
return protein
def lowercase ( __A : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]:
'''simple docstring'''
snake_case : Tuple = tree_map(lambda __A : torch.tensor(__A , device=batch["""aatype"""].device ) , __A , np.ndarray )
snake_case : int = tensor_tree_map(lambda __A : np.array(__A ) , make_atomaa_masks(__A ) )
return out
| 36 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'umt5'
_lowercase = ['past_key_values']
def __init__( self : Union[str, Any] , lowerCamelCase__ : List[Any]=250_112 , lowerCamelCase__ : str=512 , lowerCamelCase__ : Optional[Any]=64 , lowerCamelCase__ : List[Any]=1_024 , lowerCamelCase__ : int=8 , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[Any]=6 , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=128 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[Any]=1E-6 , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]="gated-gelu" , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any="T5Tokenizer" , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : List[str]=0 , **lowerCamelCase__ : Any , ):
super().__init__(
is_encoder_decoder=lowerCamelCase__ , tokenizer_class=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Dict = vocab_size
a__ : List[Any] = d_model
a__ : Optional[Any] = d_kv
a__ : Optional[int] = d_ff
a__ : List[Any] = num_layers
a__ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a__ : str = num_heads
a__ : Optional[int] = relative_attention_num_buckets
a__ : Any = relative_attention_max_distance
a__ : int = dropout_rate
a__ : Union[str, Any] = layer_norm_epsilon
a__ : Optional[int] = initializer_factor
a__ : Optional[int] = feed_forward_proj
a__ : Tuple = use_cache
a__ : List[Any] = self.feed_forward_proj.split("-" )
a__ : Optional[int] = act_info[-1]
a__ : Union[str, Any] = act_info[0] == "gated"
if len(lowerCamelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
a__ : Optional[int] = "gelu_new"
@property
def _UpperCamelCase( self : int ):
return self.d_model
@property
def _UpperCamelCase( self : int ):
return self.num_heads
@property
def _UpperCamelCase( self : Dict ):
return self.num_layers
class A__ ( A__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
a__ : Tuple = "past_encoder_sequence + sequence"
a__ : Dict = {0: "batch"}
a__ : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a__ : Tuple = {0: "batch", 1: "decoder_sequence"}
a__ : List[str] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _UpperCamelCase( self : int ):
return 13
@property
def _UpperCamelCase( self : List[str] ):
return 5E-4
| 37 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A_ : int = threading.Lock()
A_ : Optional[logging.Handler] = None
A_ : str = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A_ : Dict = logging.WARNING
A_ : Tuple = True
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : Dict = os.getenv("""TRANSFORMERS_VERBOSITY""" , __magic_name__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
return __name__.split(""".""" )[0]
def UpperCamelCase__ ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
snake_case__ : int = logging.StreamHandler() # Set sys.stderr as stream.
snake_case__ : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
snake_case__ : Dict = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
snake_case__ : List[str] = False
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
snake_case__ : Optional[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
snake_case__ : str = None
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
return log_levels
def UpperCamelCase__ ( __magic_name__ : Optional[str] = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
snake_case__ : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__magic_name__ )
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase__ ( __magic_name__ : int ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(__magic_name__ )
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
return set_verbosity(__magic_name__ )
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
return set_verbosity(__magic_name__ )
def UpperCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
return set_verbosity(__magic_name__ )
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
return set_verbosity(__magic_name__ )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCamelCase__ ( __magic_name__ : logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__magic_name__ )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
snake_case__ : List[str] = False
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
snake_case__ : str = True
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
snake_case__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
snake_case__ : Optional[int] = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(__magic_name__ )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
snake_case__ : Any = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__magic_name__ )
def UpperCamelCase__ ( self : List[Any] , *__magic_name__ : str , **__magic_name__ : List[str] ) -> Any:
'''simple docstring'''
snake_case__ : Dict = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , __magic_name__ )
if no_advisory_warnings:
return
self.warning(*__magic_name__ , **__magic_name__ )
A_ : List[str] = warning_advice
@functools.lru_cache(__magic_name__ )
def UpperCamelCase__ ( self : Optional[int] , *__magic_name__ : int , **__magic_name__ : List[Any] ) -> Dict:
'''simple docstring'''
self.warning(*__magic_name__ , **__magic_name__ )
A_ : Dict = warning_once
class __snake_case :
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
snake_case__ : List[str] = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , __SCREAMING_SNAKE_CASE ):
def empty_fn(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return
class __snake_case :
'''simple docstring'''
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
if _tqdm_active:
return tqdm_lib.tqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
snake_case__ : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ : List[str] = _tqdm_cls()
def UpperCamelCase__ ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
global _tqdm_active
snake_case__ : List[str] = True
hf_hub_utils.enable_progress_bars()
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
global _tqdm_active
snake_case__ : List[str] = False
hf_hub_utils.disable_progress_bars()
| 38 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "geglu" , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : str = "layer_norm" , _UpperCamelCase : bool = False , ) ->Optional[int]:
super().__init__()
snake_case_ = only_cross_attention
snake_case_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
snake_case_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case_ = AdaLayerNorm(_UpperCamelCase , _UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ = AdaLayerNormZero(_UpperCamelCase , _UpperCamelCase )
else:
snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
snake_case_ = Attention(
query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_UpperCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case_ = (
AdaLayerNorm(_UpperCamelCase , _UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
)
snake_case_ = Attention(
query_dim=_UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , upcast_attention=_UpperCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
snake_case_ = None
snake_case_ = None
# 3. Feed-forward
snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
snake_case_ = FeedForward(_UpperCamelCase , dropout=_UpperCamelCase , activation_fn=_UpperCamelCase , final_dropout=_UpperCamelCase )
# let chunk size default to None
snake_case_ = None
snake_case_ = 0
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) ->str:
# Sets chunk feed-forward
snake_case_ = chunk_size
snake_case_ = dim
def snake_case__( self : List[str] , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , _UpperCamelCase : Dict[str, Any] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , ) ->str:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case_ = self.norma(_UpperCamelCase , _UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = self.norma(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hidden_dtype=hidden_states.dtype )
else:
snake_case_ = self.norma(_UpperCamelCase )
snake_case_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case_ = self.attna(
_UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_UpperCamelCase , **_UpperCamelCase , )
if self.use_ada_layer_norm_zero:
snake_case_ = gate_msa.unsqueeze(1 ) * attn_output
snake_case_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case_ = (
self.norma(_UpperCamelCase , _UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase )
)
snake_case_ = self.attna(
_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , attention_mask=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = attn_output + hidden_states
# 3. Feed-forward
snake_case_ = self.norma(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
snake_case_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case_ = torch.cat(
[self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case_ = self.ff(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ = gate_mlp.unsqueeze(1 ) * ff_output
snake_case_ = ff_output + hidden_states
return hidden_states
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 4 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : str = "geglu" , _UpperCamelCase : bool = False , ) ->str:
super().__init__()
snake_case_ = int(dim * mult )
snake_case_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case_ = GELU(_UpperCamelCase , _UpperCamelCase )
if activation_fn == "gelu-approximate":
snake_case_ = GELU(_UpperCamelCase , _UpperCamelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
snake_case_ = GEGLU(_UpperCamelCase , _UpperCamelCase )
elif activation_fn == "geglu-approximate":
snake_case_ = ApproximateGELU(_UpperCamelCase , _UpperCamelCase )
snake_case_ = nn.ModuleList([] )
# project in
self.net.append(_UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(_UpperCamelCase ) )
# project out
self.net.append(nn.Linear(_UpperCamelCase , _UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_UpperCamelCase ) )
def snake_case__( self : Any , _UpperCamelCase : Optional[Any] ) ->int:
for module in self.net:
snake_case_ = module(_UpperCamelCase )
return hidden_states
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str = "none" ) ->Any:
super().__init__()
snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase )
snake_case_ = approximate
def snake_case__( self : List[str] , _UpperCamelCase : Tuple ) ->List[Any]:
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = self.proj(_UpperCamelCase )
snake_case_ = self.gelu(_UpperCamelCase )
return hidden_states
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int ) ->Union[str, Any]:
super().__init__()
snake_case_ = nn.Linear(_UpperCamelCase , dim_out * 2 )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : List[str] ) ->str:
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Optional[int]:
snake_case_, snake_case_ = self.proj(_UpperCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_UpperCamelCase )
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) ->Optional[Any]:
super().__init__()
snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] , _UpperCamelCase : int ) ->Any:
snake_case_ = self.proj(_UpperCamelCase )
return x * torch.sigmoid(1.702 * x )
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] ) ->List[Any]:
super().__init__()
snake_case_ = nn.Embedding(_UpperCamelCase , _UpperCamelCase )
snake_case_ = nn.SiLU()
snake_case_ = nn.Linear(_UpperCamelCase , embedding_dim * 2 )
snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->Union[str, Any]:
snake_case_ = self.linear(self.silu(self.emb(_UpperCamelCase ) ) )
snake_case_, snake_case_ = torch.chunk(_UpperCamelCase , 2 )
snake_case_ = self.norm(_UpperCamelCase ) * (1 + scale) + shift
return x
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : int , _UpperCamelCase : int ) ->str:
super().__init__()
snake_case_ = CombinedTimestepLabelEmbeddings(_UpperCamelCase , _UpperCamelCase )
snake_case_ = nn.SiLU()
snake_case_ = nn.Linear(_UpperCamelCase , 6 * embedding_dim , bias=_UpperCamelCase )
snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase , eps=1e-6 )
def snake_case__( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict=None ) ->Optional[int]:
snake_case_ = self.linear(self.silu(self.emb(_UpperCamelCase , _UpperCamelCase , hidden_dtype=_UpperCamelCase ) ) )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = emb.chunk(6 , dim=1 )
snake_case_ = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : float = 1e-5 ) ->Optional[Any]:
super().__init__()
snake_case_ = num_groups
snake_case_ = eps
if act_fn is None:
snake_case_ = None
else:
snake_case_ = get_activation(_UpperCamelCase )
snake_case_ = nn.Linear(_UpperCamelCase , out_dim * 2 )
def snake_case__( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]:
if self.act:
snake_case_ = self.act(_UpperCamelCase )
snake_case_ = self.linear(_UpperCamelCase )
snake_case_ = emb[:, :, None, None]
snake_case_, snake_case_ = emb.chunk(2 , dim=1 )
snake_case_ = F.group_norm(_UpperCamelCase , self.num_groups , eps=self.eps )
snake_case_ = x * (1 + scale) + shift
return x | 39 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
from itertools import count
def UpperCamelCase ( snake_case__ : int = 50 ) -> int:
UpperCamelCase : List[str] = [1] * min_block_length
for n in count(snake_case__ ):
fill_count_functions.append(1 )
for block_length in range(snake_case__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 40 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = PandasConfig
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
lowerCamelCase_ = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ = table_cast(SCREAMING_SNAKE_CASE_ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
lowerCamelCase_ = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE_ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE_ )
| 42 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = '''stabilityai/stable-diffusion-2'''
lowercase__ , lowercase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase__ = scheduler_params
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaControlnetImgaImgPipeline
lowerCAmelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def lowerCamelCase_ ( self : Any ):
return 3_2
@property
def lowerCamelCase_ ( self : List[str] ):
return 3_2
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Any ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
return 1_0_0
@property
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_lowerCamelCase : Any = UNetaDConditionModel(**__A )
return model
@property
def lowerCamelCase_ ( self : Any ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = self.dummy_unet
_lowerCamelCase : int = self.dummy_movq
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_lowerCamelCase : Optional[Any] = DDIMScheduler(**__A )
_lowerCamelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCamelCase_ ( self : Dict,__A : List[Any],__A : int=0 ):
_lowerCamelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__A )
# create init_image
_lowerCamelCase : Any = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Optional[Any] = image.cpu().permute(0,2,3,1 )[0]
_lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
_lowerCamelCase : str = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("mps" ):
_lowerCamelCase : int = torch.manual_seed(__A )
else:
_lowerCamelCase : Union[str, Any] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : Optional[Any] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = "cpu"
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : Any = self.pipeline_class(**__A )
_lowerCamelCase : Optional[int] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(__A ) )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Optional[Any] = pipe(
**self.get_dummy_inputs(__A ),return_dict=__A,)[0]
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : Any = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
_lowerCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_lowerCamelCase : Any = init_image.resize((5_1_2, 5_1_2) )
_lowerCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
_lowerCamelCase : List[str] = torch.from_numpy(np.array(__A ) ).float() / 255.0
_lowerCamelCase : Union[str, Any] = hint.permute(2,0,1 ).unsqueeze(0 )
_lowerCamelCase : int = "A robot, 4k photo"
_lowerCamelCase : Optional[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior",torch_dtype=torch.floataa )
pipe_prior.to(__A )
_lowerCamelCase : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth",torch_dtype=torch.floataa )
_lowerCamelCase : Optional[int] = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCamelCase , _lowerCamelCase : List[Any] = pipe_prior(
__A,image=__A,strength=0.85,generator=__A,negative_prompt="",).to_tuple()
_lowerCamelCase : Any = pipeline(
image=__A,image_embeds=__A,negative_image_embeds=__A,hint=__A,generator=__A,num_inference_steps=1_0_0,height=5_1_2,width=5_1_2,strength=0.5,output_type="np",)
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__A,__A ) | 44 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = ["""input_features""", """attention_mask"""]
def __init__( self :int , lowerCamelCase__ :Optional[int]=80 , lowerCamelCase__ :Any=1_60_00 , lowerCamelCase__ :Union[str, Any]=80 , lowerCamelCase__ :List[Any]=0.0 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :str=True , **lowerCamelCase__ :str , ):
super().__init__(feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = num_mel_bins
UpperCamelCase__ :Dict = do_ceptral_normalize
UpperCamelCase__ :Optional[Any] = normalize_means
UpperCamelCase__ :Optional[int] = normalize_vars
UpperCamelCase__ :List[Any] = True
def __a ( self :Any , lowerCamelCase__ :np.ndarray , ):
UpperCamelCase__ :Union[str, Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase__ :str = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 )
UpperCamelCase__ :Tuple = ta_kaldi.fbank(lowerCamelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __a ( lowerCamelCase__ :np.ndarray , lowerCamelCase__ :int , lowerCamelCase__ :Optional[bool] = True , lowerCamelCase__ :Optional[bool] = True , lowerCamelCase__ :float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
UpperCamelCase__ :Any = x[:input_length].mean(axis=0 )
UpperCamelCase__ :int = np.subtract(lowerCamelCase__ , lowerCamelCase__ )
if normalize_vars:
UpperCamelCase__ :str = x[:input_length].std(axis=0 )
UpperCamelCase__ :str = np.divide(lowerCamelCase__ , lowerCamelCase__ )
if input_length < x.shape[0]:
UpperCamelCase__ :List[str] = padding_value
# make sure array is in float32
UpperCamelCase__ :Optional[int] = x.astype(np.floataa )
return x
def __a ( self :Optional[Any] , lowerCamelCase__ :List[np.ndarray] , lowerCamelCase__ :Optional[np.ndarray] = None ):
UpperCamelCase__ :List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCamelCase__ , lowerCamelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCamelCase__ , lowerCamelCase__ )
]
def __call__( self :List[Any] , lowerCamelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :Optional[Union[str, TensorType]] = None , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :Optional[bool] = None , **lowerCamelCase__ :Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCamelCase__ :Union[str, Any] = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase__ :Union[str, Any] = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ :List[str] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
UpperCamelCase__ :str = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ :Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ :List[Any] = [raw_speech]
# extract fbank features
UpperCamelCase__ :Dict = [self._extract_fbank_features(lowerCamelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase__ :Optional[Any] = BatchFeature({"""input_features""": features} )
UpperCamelCase__ :List[Any] = self.pad(
lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
# make sure list is in array format
UpperCamelCase__ :List[str] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , lowerCamelCase__ ):
UpperCamelCase__ :int = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase__ :Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCamelCase__ :List[str] = [np.asarray(lowerCamelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase__ :int = (
np.array(lowerCamelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase__ , max_length=lowerCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase__ :int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=lowerCamelCase__ )
if return_tensors is not None:
UpperCamelCase__ :List[str] = padded_inputs.convert_to_tensors(lowerCamelCase__ )
return padded_inputs | 45 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCamelCase : Any = emb.weight.data
return lin_layer
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Any:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
_lowerCamelCase : List[str] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowerCamelCase : Any = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" )
else:
_lowerCamelCase : Optional[Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
_lowerCamelCase : int = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
_lowerCamelCase : Tuple = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
_lowerCamelCase : Union[str, Any] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
_lowerCamelCase : Union[str, Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
_lowerCamelCase : int = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
_lowerCamelCase : Tuple = key.replace("final_layer_norm" , "ff_layer_norm" )
_lowerCamelCase : List[str] = state_dict[old_key]
return new_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[Any] = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
_lowerCamelCase : Dict = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(_lowerCamelCase ):
_lowerCamelCase : List[str] = torch.load(_lowerCamelCase )["model"]
remove_ignore_keys_(_lowerCamelCase )
_lowerCamelCase : Any = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = os.path.join(
_lowerCamelCase , weights_name.replace(".bin" , F"""-{len(_lowerCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , F"""-{len(_lowerCamelCase )+1:05d}-of-???.bin""" ) )
_lowerCamelCase : List[str] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_lowerCamelCase )
_lowerCamelCase : int = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
_lowerCamelCase : Optional[int] = {}
for idx, shard in enumerate(_lowerCamelCase ):
_lowerCamelCase : int = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin""" )
_lowerCamelCase : str = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
_lowerCamelCase : Optional[Any] = shard_file
# Add the metadata
_lowerCamelCase : Union[str, Any] = {"total_size": total_size}
_lowerCamelCase : Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + "\n"
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase , _lowerCAmelCase : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCAmelCase : List[Any] = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCAmelCase : Union[str, Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 46 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
import socket
def UpperCAmelCase__ ( ):
__a : int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__a : Optional[Any] = socket.gethostname()
__a : int = 1_2_3_1_2
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__a : List[Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(lowerCamelCase_ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 47 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
for char in word:
lowerCAmelCase__ = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def A ( UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = set()
for token in tokens:
lowerCAmelCase__ = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
lowerCAmelCase__ = list(UpperCamelCase_ )
return word_list
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : set() ) -> str:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowerCAmelCase__ = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
lowerCAmelCase__ = bert_tokens
lowerCAmelCase__ ,lowerCAmelCase__ = 0, len(UpperCamelCase_ )
while start < end:
lowerCAmelCase__ = True
if is_chinese(bert_word[start] ):
lowerCAmelCase__ = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
lowerCAmelCase__ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase__ = "##" + bert_word[j]
lowerCAmelCase__ = start + i
lowerCAmelCase__ = False
break
if single_word:
start += 1
return bert_word
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : LTP , UpperCamelCase_ : BertTokenizer ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
for i in range(0 , len(UpperCamelCase_ ) , 1_00 ):
lowerCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
lowerCAmelCase__ = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCAmelCase__ = []
for i in range(0 , len(UpperCamelCase_ ) , 1_00 ):
lowerCAmelCase__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCAmelCase__ = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = []
for id in input_ids:
lowerCAmelCase__ = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
lowerCAmelCase__ = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
lowerCAmelCase__ = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def A ( UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase__ = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
lowerCAmelCase__ = [json.dumps(UpperCamelCase_ ) + "\n" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Any = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
main(args)
| 48 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = 384
if "tiny" in model_name:
__UpperCAmelCase = [3, 3, 9, 3]
__UpperCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase = [3, 3, 27, 3]
__UpperCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase = [3, 3, 27, 3]
__UpperCAmelCase = [128, 256, 512, 1_024]
__UpperCAmelCase = 512
if "large" in model_name:
__UpperCAmelCase = [3, 3, 27, 3]
__UpperCAmelCase = [192, 384, 768, 1_536]
__UpperCAmelCase = 768
if "xlarge" in model_name:
__UpperCAmelCase = [3, 3, 27, 3]
__UpperCAmelCase = [256, 512, 1_024, 2_048]
__UpperCAmelCase = 1_024
# set label information
__UpperCAmelCase = 150
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = '''ade20k-id2label.json'''
__UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = ConvNextConfig(
depths=snake_case_ , hidden_sizes=snake_case_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__UpperCAmelCase = UperNetConfig(
backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :Any ):
__UpperCAmelCase = dct.pop(snake_case_ )
__UpperCAmelCase = val
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[str] , snake_case_ :List[Any] ):
__UpperCAmelCase = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__UpperCAmelCase = model_name_to_url[model_name]
__UpperCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )['''state_dict''']
__UpperCAmelCase = get_upernet_config(snake_case_ )
__UpperCAmelCase = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase = state_dict.pop(snake_case_ )
if "bn" in key:
__UpperCAmelCase = key.replace('''bn''' , '''batch_norm''' )
__UpperCAmelCase = val
# rename keys
__UpperCAmelCase = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
__UpperCAmelCase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
__UpperCAmelCase = SegformerImageProcessor()
__UpperCAmelCase = processor(snake_case_ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__UpperCAmelCase = model(snake_case_ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowercase : Dict = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = ['input_features', 'attention_mask']
def __init__( self ,_lowerCAmelCase=80 ,_lowerCAmelCase=1_60_00 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=10 ,_lowerCAmelCase=25 ,_lowerCAmelCase="hamming_window" ,_lowerCAmelCase=3_2768.0 ,_lowerCAmelCase=0.97 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
super().__init__(feature_size=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,padding_value=_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = feature_size
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = padding_value
lowerCamelCase__ = hop_length
lowerCamelCase__ = win_length
lowerCamelCase__ = frame_signal_scale
lowerCamelCase__ = preemphasis_coeff
lowerCamelCase__ = mel_floor
lowerCamelCase__ = normalize_means
lowerCamelCase__ = normalize_vars
lowerCamelCase__ = win_function
lowerCamelCase__ = return_attention_mask
lowerCamelCase__ = win_length * sampling_rate // 10_00
lowerCamelCase__ = hop_length * sampling_rate // 10_00
lowerCamelCase__ = optimal_fft_length(self.sample_size )
lowerCamelCase__ = (self.n_fft // 2) + 1
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if self.win_function == "hamming_window":
lowerCamelCase__ = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=_lowerCAmelCase )
else:
lowerCamelCase__ = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCamelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCamelCase__ = spectrogram(
one_waveform * self.frame_signal_scale ,window=_lowerCAmelCase ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=_lowerCAmelCase ,preemphasis=self.preemphasis_coeff ,mel_filters=_lowerCAmelCase ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make sure we normalize float32 arrays
if self.normalize_means:
lowerCamelCase__ = x[:input_length].mean(axis=0 )
lowerCamelCase__ = np.subtract(_lowerCAmelCase ,_lowerCAmelCase )
if self.normalize_vars:
lowerCamelCase__ = x[:input_length].std(axis=0 )
lowerCamelCase__ = np.divide(_lowerCAmelCase ,_lowerCAmelCase )
if input_length < x.shape[0]:
lowerCamelCase__ = padding_value
# make sure array is in float32
lowerCamelCase__ = x.astype(np.floataa )
return x
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCAmelCase ,_lowerCAmelCase ,self.padding_value ) for x, n in zip(_lowerCAmelCase ,_lowerCAmelCase )]
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase__ = isinstance(_lowerCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase__ = is_batched_numpy or (
isinstance(_lowerCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCAmelCase ,np.ndarray ):
lowerCamelCase__ = np.asarray(_lowerCAmelCase ,dtype=np.floataa )
elif isinstance(_lowerCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ = [raw_speech]
# extract fbank features
lowerCamelCase__ = [self._extract_mfsc_features(_lowerCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__ = BatchFeature({"""input_features""": features} )
lowerCamelCase__ = self.pad(
_lowerCAmelCase ,padding=_lowerCAmelCase ,max_length=_lowerCAmelCase ,truncation=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,**_lowerCAmelCase ,)
# make sure list is in array format
lowerCamelCase__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,_lowerCAmelCase ):
lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for feature in input_features]
lowerCamelCase__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase__ = (
np.array(_lowerCAmelCase ,dtype=np.intaa )
if self._get_padding_strategies(_lowerCAmelCase ,max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase__ = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=_lowerCAmelCase )
if return_tensors is not None:
lowerCamelCase__ = padded_inputs.convert_to_tensors(_lowerCAmelCase )
return padded_inputs
| 50 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Optional[int] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="speech_to_text"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , a__ : Any=10000 , a__ : Union[str, Any]=12 , a__ : List[str]=2048 , a__ : List[Any]=4 , a__ : Optional[Any]=6 , a__ : Dict=2048 , a__ : Optional[int]=4 , a__ : Dict=0.0 , a__ : List[str]=0.0 , a__ : Dict=True , a__ : str=True , a__ : Tuple="relu" , a__ : str=256 , a__ : Tuple=0.1 , a__ : Tuple=0.0 , a__ : List[str]=0.0 , a__ : Dict=0.02 , a__ : List[Any]=2 , a__ : Union[str, Any]=True , a__ : Optional[Any]=1 , a__ : Tuple=0 , a__ : Optional[int]=2 , a__ : List[str]=6000 , a__ : List[str]=1024 , a__ : Dict=2 , a__ : int=(5, 5) , a__ : Union[str, Any]=1024 , a__ : Optional[Any]=80 , a__ : Tuple=1 , **a__ : Optional[int] , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
UpperCAmelCase = num_conv_layers
UpperCAmelCase = list(a__ )
UpperCAmelCase = conv_channels
UpperCAmelCase = input_feat_per_channel
UpperCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , **a__ , )
| 51 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a : Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
__a : List[str] = parent
__a : str = batch_size
__a : Any = num_channels
__a : List[str] = min_resolution
__a : str = max_resolution
__a : Optional[int] = do_resize
__a : Dict = size
__a : Union[str, Any] = do_normalize
__a : Tuple = image_mean
__a : Dict = image_std
__a : str = do_rescale
__a : Optional[int] = rescale_factor
__a : Tuple = do_pad
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if not batched:
__a : Dict = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__a , __a : Union[str, Any] = image.size
else:
__a , __a : Any = image.shape[1], image.shape[2]
if w < h:
__a : List[str] = int(self.size['''shortest_edge'''] * h / w )
__a : int = self.size['''shortest_edge''']
elif w > h:
__a : Dict = self.size['''shortest_edge''']
__a : int = int(self.size['''shortest_edge'''] * w / h )
else:
__a : str = self.size['''shortest_edge''']
__a : str = self.size['''shortest_edge''']
else:
__a : Optional[int] = []
for image in image_inputs:
__a , __a : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : Optional[int] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__a : str = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = YolosImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Optional[Any] = YolosImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
__a : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
__a : List[Any] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Tuple = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Any = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
__a , __a : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processings
__a : Dict = self.image_processing_class(**self.image_processor_dict )
__a : Any = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase )
# create random PyTorch tensors
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__a : List[str] = image_processing_a.pad(_UpperCAmelCase , return_tensors='''pt''' )
__a : Dict = image_processing_a(_UpperCAmelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
# prepare image and target
__a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__a : Optional[Any] = json.loads(f.read() )
__a : Optional[Any] = {'''image_id''': 39769, '''annotations''': target}
# encode them
__a : Any = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
__a : List[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase )
__a : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
__a : Union[str, Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase )
__a : int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) )
# verify is_crowd
__a : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) )
# verify class_labels
__a : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) )
# verify size
__a : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
# prepare image, target and masks_path
__a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__a : Optional[Any] = json.loads(f.read() )
__a : int = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
__a : str = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__a : List[str] = YolosImageProcessor(format='''coco_panoptic''' )
__a : Tuple = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase )
__a : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
__a : List[str] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase )
__a : Dict = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
__a : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) )
# verify masks
__a : Any = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _UpperCAmelCase )
# verify orig_size
__a : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) )
# verify size
__a : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) ) | 52 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 53 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowercase : Optional[int] =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Path , _lowerCAmelCase: Union[str, None] = None , _lowerCAmelCase: Union[List[str], None] = None , _lowerCAmelCase: Union[str, List[str], None] = None , _lowerCAmelCase: bool = True , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =[file for file in os.listdir(_lowerCAmelCase ) if os.path.isfile(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )]
if identifier is not None:
UpperCAmelCase_ =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase_ =[file for file in files if n_ not in file]
else:
UpperCAmelCase_ =[file for file in files if n_identifier not in file]
UpperCAmelCase_ =ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , _lowerCAmelCase )
if only_modules:
UpperCAmelCase_ =file.split("." )[0]
try:
UpperCAmelCase_ =getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =doctest.DocTestSuite(_lowerCAmelCase )
UpperCAmelCase_ =unittest.TextTestRunner().run(_lowerCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCAmelCase_ =doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="modeling"
UpperCAmelCase_ =[
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase , ignore_files=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="tokenization"
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="configuration"
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ =["configuration", "modeling", "tokenization"]
self.analyze_directory(_lowerCAmelCase , n_identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =Path("docs/source" )
UpperCAmelCase_ =["favicon.ico"]
self.analyze_directory(_lowerCAmelCase , ignore_files=_lowerCAmelCase , only_modules=_lowerCAmelCase )
| 54 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE :str = {
'gpt-neox-20b': 2048,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : str ,A : List[Any]=None ,A : str=None ,A : str=None ,A : Tuple="<|endoftext|>" ,A : Union[str, Any]="<|endoftext|>" ,A : Union[str, Any]="<|endoftext|>" ,A : Union[str, Any]=False ,**A : List[Any] ,):
super().__init__(
A ,A ,tokenizer_file=A ,unk_token=A ,bos_token=A ,eos_token=A ,add_prefix_space=A ,**A ,)
__A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,A ) != add_prefix_space:
__A = getattr(A ,pre_tok_state.pop("type" ) )
__A = add_prefix_space
__A = pre_tok_class(**A )
__A = add_prefix_space
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Optional[str] = None ):
__A = self._tokenizer.model.save(A ,name=A )
return tuple(A )
def UpperCamelCase_ ( self : Optional[int] ,A : "Conversation" ):
__A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A ,add_special_tokens=A ) + [self.eos_token_id] )
if len(A ) > self.model_max_length:
__A = input_ids[-self.model_max_length :]
return input_ids
| 55 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Any = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 56 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
from __future__ import annotations
from math import pi
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : int = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = ['''PoolFormerFeatureExtractor''']
__lowerCAmelCase : Tuple = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 58 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
import sys
import turtle
def lowerCAmelCase_ ( __a , __a ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCAmelCase_ ( __a , __a , __a , __a , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__a , get_mid(__a , __a ) , get_mid(__a , __a ) , depth - 1 )
triangle(__a , get_mid(__a , __a ) , get_mid(__a , __a ) , depth - 1 )
triangle(__a , get_mid(__a , __a ) , get_mid(__a , __a ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__A = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__A = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 59 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = np.argmax(_UpperCamelCase , axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf_8''' ) as f:
snake_case_ : List[str] = csv.reader(_UpperCamelCase )
snake_case_ : Dict = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = []
for dataset in encoded_datasets:
snake_case_ : List[str] = len(_UpperCamelCase )
snake_case_ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case_ : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case_ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
snake_case_ : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
snake_case_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : List[Any] = with_conta
snake_case_ : List[str] = with_conta
snake_case_ : Optional[Any] = len(_UpperCamelCase ) - 1
snake_case_ : int = len(_UpperCamelCase ) - 1
snake_case_ : Optional[Any] = with_conta
snake_case_ : Union[str, Any] = with_conta
snake_case_ : Any = mc_label
snake_case_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_UpperCamelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_UpperCamelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_UpperCamelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_UpperCamelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_UpperCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_UpperCamelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_UpperCamelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_UpperCamelCase , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_UpperCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_UpperCamelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_UpperCamelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_UpperCamelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_UpperCamelCase , default=374 )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : List[Any] = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case_ : int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase , _UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case_ : Dict = ['''_start_''', '''_delimiter_''', '''_classify_''']
snake_case_ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
snake_case_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
snake_case_ : Tuple = load_rocstories_dataset(args.train_dataset )
snake_case_ : Dict = load_rocstories_dataset(args.eval_dataset )
snake_case_ : Optional[Any] = (train_dataset, eval_dataset)
snake_case_ : Union[str, Any] = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
snake_case_ : Dict = model.config.n_positions // 2 - 2
snake_case_ : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case_ : str = min(_UpperCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case_ : int = pre_process_datasets(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : str = tensor_datasets[0], tensor_datasets[1]
snake_case_ : List[str] = TensorDataset(*_UpperCamelCase )
snake_case_ : int = RandomSampler(_UpperCamelCase )
snake_case_ : Union[str, Any] = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.train_batch_size )
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : str = SequentialSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case_ : Tuple = args.max_steps
snake_case_ : Union[str, Any] = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
snake_case_ : List[Any] = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case_ : Optional[Any] = list(model.named_parameters() )
snake_case_ : List[str] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
snake_case_ : str = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
snake_case_ : Optional[int] = AdamW(_UpperCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case_ : Optional[int] = get_linear_schedule_with_warmup(
_UpperCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCamelCase )
if args.do_train:
snake_case_ , snake_case_ , snake_case_ : List[str] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : Any = tqdm(_UpperCamelCase , desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
snake_case_ : Dict = tuple(t.to(_UpperCamelCase ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = batch
snake_case_ : Union[str, Any] = model(_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
snake_case_ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case_ : int = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case_ : List[Any] = model.module if hasattr(_UpperCamelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case_ : Any = os.path.join(args.output_dir , _UpperCamelCase )
snake_case_ : Optional[Any] = os.path.join(args.output_dir , _UpperCamelCase )
torch.save(model_to_save.state_dict() , _UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case_ : str = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
snake_case_ , snake_case_ : str = 0, 0
snake_case_ , snake_case_ : Any = 0, 0
for batch in tqdm(_UpperCamelCase , desc='''Evaluating''' ):
snake_case_ : Any = tuple(t.to(_UpperCamelCase ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = batch
with torch.no_grad():
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = model(
_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
snake_case_ : Optional[Any] = mc_logits.detach().cpu().numpy()
snake_case_ : List[Any] = mc_labels.to('''cpu''' ).numpy()
snake_case_ : Any = accuracy(_UpperCamelCase , _UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case_ : Any = eval_loss / nb_eval_steps
snake_case_ : Optional[int] = eval_accuracy / nb_eval_examples
snake_case_ : int = tr_loss / nb_tr_steps if args.do_train else None
snake_case_ : Optional[Any] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
snake_case_ : List[str] = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 60 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> None:
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 61 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=14 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Tuple = use_mc_token_ids
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : str = self.vocab_size - 1
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , *UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : str = CTRLModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = CTRLLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Dict = CTRLForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCamelCase_ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCamelCase_ : Dict = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[str] = False
def _A ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def _A ( self : Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : int ):
pass
@slow
def _A ( self : Any ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = CTRLModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _A ( self : Dict ):
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Dict = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=UpperCAmelCase_ ) # Legal the president is
SCREAMING_SNAKE_CASE : List[str] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE : str = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 62 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
import re
from filelock import FileLock
try:
import nltk
a : Optional[int] = True
except (ImportError, ModuleNotFoundError):
a : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase__ ( __lowerCamelCase : str ):
re.sub("""<n>""" , """""" , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 63 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowerCamelCase ( UpperCamelCase_ ):
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , '''num_attention_heads''' ) )
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=3 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=16 , lowerCAmelCase=[128, 256, 384] , lowerCAmelCase=[4, 6, 8] , lowerCAmelCase=[2, 3, 4] , lowerCAmelCase=[16, 16, 16] , lowerCAmelCase=0 , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: int= image_size
SCREAMING_SNAKE_CASE__: Any= num_channels
SCREAMING_SNAKE_CASE__: Tuple= kernel_size
SCREAMING_SNAKE_CASE__: List[str]= stride
SCREAMING_SNAKE_CASE__: Tuple= padding
SCREAMING_SNAKE_CASE__: str= hidden_sizes
SCREAMING_SNAKE_CASE__: List[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: Any= depths
SCREAMING_SNAKE_CASE__: Tuple= key_dim
SCREAMING_SNAKE_CASE__: Union[str, Any]= drop_path_rate
SCREAMING_SNAKE_CASE__: Optional[Any]= patch_size
SCREAMING_SNAKE_CASE__: List[str]= attention_ratio
SCREAMING_SNAKE_CASE__: List[str]= mlp_ratio
SCREAMING_SNAKE_CASE__: Dict= initializer_range
SCREAMING_SNAKE_CASE__: str= [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE__: str= is_training
SCREAMING_SNAKE_CASE__: Optional[Any]= use_labels
SCREAMING_SNAKE_CASE__: str= num_labels
SCREAMING_SNAKE_CASE__: Optional[int]= initializer_range
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__: Optional[int]= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__: List[Any]= self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[int]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= LevitModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE__: int= floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE__: Optional[int]= floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: str= self.num_labels
SCREAMING_SNAKE_CASE__: int= LevitForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= config_and_inputs
SCREAMING_SNAKE_CASE__: List[Any]= {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: int= LevitModelTester(self )
SCREAMING_SNAKE_CASE__: Any= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ) -> Optional[Any]:
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def UpperCamelCase_ ( self ) -> int:
pass
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: Any= model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__: Union[str, Any]= [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Tuple= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[int]= outputs.hidden_states
SCREAMING_SNAKE_CASE__: List[str]= len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE__: str= floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE__: Dict= floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: str= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__: List[str]= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: int= True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Optional[int]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(**lowerCAmelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__: int= False
SCREAMING_SNAKE_CASE__: Dict= True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__: List[str]= model_class(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Optional[int]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= model(**lowerCAmelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: Optional[Any]= [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE__: str= problem_type['''title''']
SCREAMING_SNAKE_CASE__: Optional[int]= problem_type['''num_labels''']
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__: str= inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE__: Optional[Any]= inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE__: Any= model(**lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCamelCase_ ( self ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: str= LevitModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Tuple= Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> Dict:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: str= LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= self.default_image_processor
SCREAMING_SNAKE_CASE__: Any= prepare_img()
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processor(images=lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[str]= model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__: List[str]= torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 64 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , __UpperCamelCase )
print("""Decoded:""" , decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 65 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ViTFeatureExtractor"]
UpperCamelCase = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :Union[str, Any] , snake_case__ :List[Any] ) -> Optional[Any]:
_lowercase = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case__ )}""" | 67 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Tuple ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__UpperCAmelCase =[[1, 2, 4], [1, 2, 3, 4]]
__UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__UpperCAmelCase =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def _a ( self : int ) -> Tuple:
__UpperCAmelCase =[[1, 2, 3], [1, 2, 4]]
__UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 )
__UpperCAmelCase =stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 )
__UpperCAmelCase =stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(3 )
__UpperCAmelCase =stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _a ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[Any] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 70 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCamelCase = sys.version_info >= (3, 10)
def a__ ( _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Any=None ) -> int:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _snake_case :
__A : int
__A : float
__A : str
__A : bool
@dataclass
class _snake_case :
__A : int =42
__A : str =field(default="toto" , metadata={"help": "help message"})
@dataclass
class _snake_case :
__A : bool =False
__A : bool =True
__A : Optional[bool] =None
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[int] ="titi"
__A : str ="toto"
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] ="titi"
__A : List[Any] ="toto"
__A : Optional[Any] =42
@dataclass
class _snake_case :
__A : BasicEnum ="toto"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = BasicEnum(self.foo )
@dataclass
class _snake_case :
__A : MixedTypeEnum ="toto"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = MixedTypeEnum(self.foo )
@dataclass
class _snake_case :
__A : Optional[int] =None
__A : Optional[float] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"})
__A : Optional[str] =None
__A : Optional[List[str]] =list_field(default=[])
__A : Optional[List[int]] =list_field(default=[])
@dataclass
class _snake_case :
__A : List[int] =list_field(default=[])
__A : List[int] =list_field(default=[1, 2, 3])
__A : List[str] =list_field(default=["Hallo", "Bonjour", "Hello"])
__A : List[float] =list_field(default=[0.1, 0.2, 0.3])
@dataclass
class _snake_case :
__A : List[int] =field()
__A : str =field()
__A : BasicEnum =field()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = BasicEnum(self.required_enum )
@dataclass
class _snake_case :
__A : int
__A : "BasicEnum" =field()
__A : "Optional[bool]" =None
__A : "str" =field(default="toto" , metadata={"help": "help message"})
__A : "List[str]" =list_field(default=["Hallo", "Bonjour", "Hello"])
if is_python_no_less_than_3_10:
@dataclass
class _snake_case :
__A : bool =False
__A : bool =True
__A : bool | None =None
@dataclass
class _snake_case :
__A : int | None =None
__A : float | None =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"})
__A : str | None =None
__A : list[str] | None =list_field(default=[])
__A : list[int] | None =list_field(default=[])
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
UpperCAmelCase_ : List[str] = {k: v for k, v in vars(_snake_case ).items() if k != "container"}
UpperCAmelCase_ : List[Any] = {k: v for k, v in vars(_snake_case ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" ,_snake_case ) and yy.get("choices" ,_snake_case ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_snake_case ) ,yy["type"](_snake_case ) )
del xx["type"], yy["type"]
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--bar" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--baz" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--flag" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCAmelCase_) , ) : int = parser.parse_args_into_dataclasses(_snake_case ,look_for_args_file=_snake_case )
self.assertFalse(example.flag )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo" ,default=42 ,type=_snake_case )
expected.add_argument("--baz" ,default="toto" ,type=_snake_case ,help="help message" )
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
expected.add_argument("--baz" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" ,action="store_false" ,default=_snake_case ,dest="baz" )
expected.add_argument("--opt" ,type=_snake_case ,default=_snake_case )
UpperCAmelCase_ : Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_snake_case )
for dataclass_type in dataclass_types:
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : List[str] = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : Any = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : List[str] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Any = argparse.ArgumentParser()
expected.add_argument(
"--foo" ,default="toto" ,choices=["titi", "toto", 42] ,type=make_choice_type_function(["titi", "toto", 42] ) ,)
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = parser.parse_args([] )
self.assertEqual(args.foo ,"toto" )
UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
UpperCAmelCase_ : Dict = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo ,"titi" )
UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo ,42 )
UpperCAmelCase_ : Any = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def UpperCamelCase__ ( self ):
@dataclass
class _snake_case :
__A : Literal["titi", "toto", 42] ="toto"
UpperCAmelCase_ : List[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"--foo" ,default="toto" ,choices=("titi", "toto", 42) ,type=make_choice_type_function(["titi", "toto", 42] ) ,)
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo ,"toto" )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo ,"titi" )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo ,42 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo_int" ,nargs="+" ,default=[] ,type=_snake_case )
expected.add_argument("--bar_int" ,nargs="+" ,default=[1, 2, 3] ,type=_snake_case )
expected.add_argument("--foo_str" ,nargs="+" ,default=["Hallo", "Bonjour", "Hello"] ,type=_snake_case )
expected.add_argument("--foo_float" ,nargs="+" ,default=[0.1, 0.2, 0.3] ,type=_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = parser.parse_args([] )
self.assertEqual(
_snake_case ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["Hallo", "Bonjour", "Hello"] ,foo_float=[0.1, 0.2, 0.3] ) ,)
UpperCAmelCase_ : Tuple = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_snake_case ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["a", "b", "c"] ,foo_float=[0.1, 0.7] ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = argparse.ArgumentParser()
expected.add_argument("--foo" ,default=_snake_case ,type=_snake_case )
expected.add_argument("--bar" ,default=_snake_case ,type=_snake_case ,help="help message" )
expected.add_argument("--baz" ,default=_snake_case ,type=_snake_case )
expected.add_argument("--ces" ,nargs="+" ,default=[] ,type=_snake_case )
expected.add_argument("--des" ,nargs="+" ,default=[] ,type=_snake_case )
UpperCAmelCase_ : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_snake_case )
for dataclass_type in dataclass_types:
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,bar=_snake_case ,baz=_snake_case ,ces=[] ,des=[] ) )
UpperCAmelCase_ : List[str] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_snake_case ,Namespace(foo=12 ,bar=3.14 ,baz="42" ,ces=["a", "b", "c"] ,des=[1, 2, 3] ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = HfArgumentParser(_snake_case )
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--required_list" ,nargs="+" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--required_str" ,type=_snake_case ,required=_snake_case )
expected.add_argument(
"--required_enum" ,type=make_choice_type_function(["titi", "toto"] ) ,choices=["titi", "toto"] ,required=_snake_case ,)
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,required=_snake_case )
expected.add_argument(
"--required_enum" ,type=make_choice_type_function(["titi", "toto"] ) ,choices=["titi", "toto"] ,required=_snake_case ,)
expected.add_argument("--opt" ,type=_snake_case ,default=_snake_case )
expected.add_argument("--baz" ,default="toto" ,type=_snake_case ,help="help message" )
expected.add_argument("--foo_str" ,nargs="+" ,default=["Hallo", "Bonjour", "Hello"] ,type=_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : List[str] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCAmelCase_ : Any = parser.parse_dict(_snake_case )[0]
UpperCAmelCase_ : Dict = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = HfArgumentParser(_snake_case )
UpperCAmelCase_ : str = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(_snake_case ,parser.parse_dict ,_snake_case ,allow_extra_keys=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Any = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(_snake_case ,"temp_json" )
os.mkdir(_snake_case )
with open(temp_local_path + ".json" ,"w+" ) as f:
json.dump(_snake_case ,_snake_case )
UpperCAmelCase_ : str = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCAmelCase_ : Union[str, Any] = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = HfArgumentParser(_snake_case )
UpperCAmelCase_ : int = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Dict = os.path.join(_snake_case ,"temp_yaml" )
os.mkdir(_snake_case )
with open(temp_local_path + ".yaml" ,"w+" ) as f:
yaml.dump(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCAmelCase_ : str = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = HfArgumentParser(_snake_case )
self.assertIsNotNone(_snake_case )
| 71 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'swin'
UpperCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=2_24 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=32 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =image_size
lowercase =patch_size
lowercase =num_channels
lowercase =embed_dim
lowercase =depths
lowercase =len(snake_case_ )
lowercase =num_heads
lowercase =window_size
lowercase =mlp_ratio
lowercase =qkv_bias
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =drop_path_rate
lowercase =hidden_act
lowercase =use_absolute_embeddings
lowercase =layer_norm_eps
lowercase =initializer_range
lowercase =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase =int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
lowercase =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(snake_case_ ) + 1 )]
lowercase , lowercase =get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = version.parse('1.11' )
@property
def _A( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _A( self ):
return 1E-4
| 72 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
class _snake_case :
def __init__( self , a) -> None:
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = [0] * size
SCREAMING_SNAKE_CASE = [0] * size
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> int:
return index | (index + 1)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> int:
return (index & (index + 1)) - 1
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> None:
SCREAMING_SNAKE_CASE = value
while index < self.size:
SCREAMING_SNAKE_CASE = self.get_prev(a) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = max(a , a , a)
SCREAMING_SNAKE_CASE = self.get_next(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE = 0
while left <= right:
SCREAMING_SNAKE_CASE = self.get_prev(a)
if left <= current_left:
SCREAMING_SNAKE_CASE = max(a , self.tree[right])
SCREAMING_SNAKE_CASE = current_left
else:
SCREAMING_SNAKE_CASE = max(a , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''convbert'''
def __init__( self : List[Any] , _A : Union[str, Any]=3_0522 , _A : Dict=768 , _A : int=12 , _A : Union[str, Any]=12 , _A : int=3072 , _A : Optional[int]="gelu" , _A : Optional[Any]=0.1 , _A : Optional[Any]=0.1 , _A : Union[str, Any]=512 , _A : List[str]=2 , _A : List[Any]=0.02 , _A : Union[str, Any]=1e-12 , _A : Union[str, Any]=1 , _A : List[str]=0 , _A : Optional[Any]=2 , _A : int=768 , _A : List[str]=2 , _A : str=9 , _A : List[Any]=1 , _A : Optional[Any]=None , **_A : List[str] , ):
"""simple docstring"""
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = embedding_size
__SCREAMING_SNAKE_CASE : str = head_ratio
__SCREAMING_SNAKE_CASE : Tuple = conv_kernel_size
__SCREAMING_SNAKE_CASE : int = num_groups
__SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 74 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase__ = threading.Lock()
UpperCamelCase__ = None
UpperCamelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
UpperCamelCase__ = logging.WARNING
UpperCamelCase__ = True
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Optional[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ) -> str:
return __name__.split('''.''' )[0]
def a__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def a__ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase__ : str = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase__ : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase__ : List[Any] = False
def a__ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase__ : Dict = None
def a__ ( ) -> Dict:
return log_levels
def a__ ( lowerCAmelCase__ = None ) -> logging.Logger:
if name is None:
UpperCAmelCase__ : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCAmelCase__ )
def a__ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def a__ ( ) -> Tuple:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Union[str, Any]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> int:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCAmelCase__ )
def a__ ( ) -> None:
_configure_library_root_logger()
UpperCAmelCase__ : List[Any] = False
def a__ ( ) -> None:
_configure_library_root_logger()
UpperCAmelCase__ : str = True
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase__ : str = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(lowerCAmelCase__ )
def a__ ( ) -> None:
UpperCAmelCase__ : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCAmelCase__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , lowerCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCamelCase__ = warning_advice
@functools.lru_cache(lowerCAmelCase__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCamelCase__ = warning_once
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , *_A : Optional[int] , **_A : str ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase__ : Dict = args[0] if args else None
def __iter__( self : List[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Dict , _A : Union[str, Any] ):
'''simple docstring'''
def empty_fn(*_A : int , **_A : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict ):
'''simple docstring'''
return self
def __exit__( self : Any , _A : Union[str, Any] , _A : int , _A : str ):
'''simple docstring'''
return
class lowerCamelCase_ :
def __call__( self : List[Any] , *_A : int , **_A : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def lowercase_ ( self : Tuple , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase__ = _tqdm_cls()
def a__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> List[str]:
global _tqdm_active
UpperCAmelCase__ : int = True
hf_hub_utils.enable_progress_bars()
def a__ ( ) -> List[str]:
global _tqdm_active
UpperCAmelCase__ : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 75 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCAmelCase ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 76 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class a__ ( __magic_name__ , __magic_name__ ):
lowercase_ = "dinat"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , UpperCamelCase_ : int=4 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[Any]=64 , UpperCamelCase_ : Union[str, Any]=[3, 4, 6, 5] , UpperCamelCase_ : Union[str, Any]=[2, 4, 8, 16] , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : List[str]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase_ : Tuple=3.0 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[Any]=1e-5 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : List[str] = embed_dim
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : List[str] = len(UpperCamelCase_)
__UpperCAmelCase : Tuple = num_heads
__UpperCAmelCase : Union[str, Any] = kernel_size
__UpperCAmelCase : Dict = dilations
__UpperCAmelCase : Optional[int] = mlp_ratio
__UpperCAmelCase : Tuple = qkv_bias
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : str = drop_path_rate
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase : Dict = int(embed_dim * 2 ** (len(UpperCamelCase_) - 1))
__UpperCAmelCase : Dict = layer_scale_init_value
__UpperCAmelCase : int = ["stem"] + [F"stage{idx}" for idx in range(1 , len(UpperCamelCase_) + 1)]
__UpperCAmelCase , __UpperCAmelCase : str = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names)
| 77 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCAmelCase_ ( snake_case_ : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
UpperCAmelCase_ = nums[0]
for i in range(1 , len(snake_case_ ) ):
UpperCAmelCase_ = nums[i]
UpperCAmelCase_ = max(snake_case_ , ans + num , snake_case_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
SCREAMING_SNAKE_CASE_: Any =int(input('Enter number of elements : ').strip())
SCREAMING_SNAKE_CASE_: Union[str, Any] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 78 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = get_activation("""swish""" )
self.assertIsInstance(_lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = get_activation("""silu""" )
self.assertIsInstance(_lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = get_activation("""mish""" )
self.assertIsInstance(_lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = get_activation("""gelu""" )
self.assertIsInstance(_lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 79 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__UpperCamelCase : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __UpperCamelCase ( nn.Module ):
def __init__( self : int , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = torchvision.models.resnetaaa(pretrained=_lowerCAmelCase )
__lowercase = list(model.children() )[:-2]
__lowercase = nn.Sequential(*_lowerCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str , _lowerCAmelCase : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.pool(self.model(_lowerCAmelCase ) )
__lowercase = torch.flatten(_lowerCAmelCase , start_dim=2 )
__lowercase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = [json.loads(_lowerCAmelCase ) for l in open(_lowerCAmelCase )]
__lowercase = os.path.dirname(_lowerCAmelCase )
__lowercase = tokenizer
__lowercase = labels
__lowercase = len(_lowerCAmelCase )
__lowercase = max_seq_length
__lowercase = transforms
def __len__( self : int ) -> Tuple:
"""simple docstring"""
return len(self.data )
def __getitem__( self : Any , _lowerCAmelCase : Any ) -> str:
"""simple docstring"""
__lowercase = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=_lowerCAmelCase ) )
__lowercase , __lowercase , __lowercase = sentence[0], sentence[1:-1], sentence[-1]
__lowercase = sentence[: self.max_seq_length]
__lowercase = torch.zeros(self.n_classes )
__lowercase = 1
__lowercase = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__lowercase = self.transforms(_lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [len(row["""sentence"""] ) for row in batch]
__lowercase , __lowercase = len(lowerCamelCase ), max(lowerCamelCase )
__lowercase = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long )
__lowercase = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase ) ):
__lowercase = input_row["""sentence"""]
__lowercase = 1
__lowercase = torch.stack([row["""image"""] for row in batch] )
__lowercase = torch.stack([row["""label"""] for row in batch] )
__lowercase = torch.stack([row["""image_start_token"""] for row in batch] )
__lowercase = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 80 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase__ ( nn.Module ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = jnp.floataa
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = hidden_states.shape
UpperCAmelCase_ = jax.image.resize(
_UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
UpperCAmelCase_ = self.conv(_UpperCAmelCase )
return hidden_states
class lowercase__ ( nn.Module ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = jnp.floataa
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.conv(_UpperCAmelCase )
return hidden_states
class lowercase__ ( nn.Module ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = 0.0
UpperCamelCase = None
UpperCamelCase = jnp.floataa
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase_ = nn.Conv(
_UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ = nn.Dense(_UpperCAmelCase , dtype=self.dtype )
UpperCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase_ = nn.Dropout(self.dropout_prob )
UpperCAmelCase_ = nn.Conv(
_UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase_ = None
if use_nin_shortcut:
UpperCAmelCase_ = nn.Conv(
_UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=True ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = hidden_states
UpperCAmelCase_ = self.norma(_UpperCAmelCase )
UpperCAmelCase_ = nn.swish(_UpperCAmelCase )
UpperCAmelCase_ = self.conva(_UpperCAmelCase )
UpperCAmelCase_ = self.time_emb_proj(nn.swish(_UpperCAmelCase ) )
UpperCAmelCase_ = jnp.expand_dims(jnp.expand_dims(_UpperCAmelCase , 1 ) , 1 )
UpperCAmelCase_ = hidden_states + temb
UpperCAmelCase_ = self.norma(_UpperCAmelCase )
UpperCAmelCase_ = nn.swish(_UpperCAmelCase )
UpperCAmelCase_ = self.dropout(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = self.conva(_UpperCAmelCase )
if self.conv_shortcut is not None:
UpperCAmelCase_ = self.conv_shortcut(_UpperCAmelCase )
return hidden_states + residual
| 82 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
def __init__( self : Dict , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 83 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """audio-spectrogram-transformer"""
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=16 , snake_case=True , snake_case=10 , snake_case=10 , snake_case=1024 , snake_case=128 , **snake_case , ):
super().__init__(**snake_case )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = patch_size
lowercase = qkv_bias
lowercase = frequency_stride
lowercase = time_stride
lowercase = max_length
lowercase = num_mel_bins
| 84 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : int = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.mean(1 )
# Centralize the data of class i
SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 )
SCREAMING_SNAKE_CASE__ : List[str] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : str = device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
if features.any():
SCREAMING_SNAKE_CASE__ : Any = features.mean(1 )
# Center the dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] )
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __snake_case ( __UpperCamelCase : ndarray ):
"""simple docstring"""
return np.dot(__UpperCamelCase ,__UpperCamelCase )
class _a :
"""simple docstring"""
def __init__( self : Dict , *,
UpperCAmelCase : float = np.inf , UpperCAmelCase : str = "linear" , UpperCAmelCase : float = 0.0 , ):
A_ = regularization
A_ = gamma
if kernel == "linear":
A_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
A_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
A_ = f'''Unknown kernel: {kernel}'''
raise ValueError(UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : ndarray , UpperCAmelCase : ndarray ):
return np.dot(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : ndarray , UpperCAmelCase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __A ( self : int , UpperCAmelCase : list[ndarray] , UpperCAmelCase : ndarray ):
A_ = observations
A_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((A_) , ) = np.shape(UpperCAmelCase )
def to_minimize(UpperCAmelCase : ndarray ) -> float:
A_ = 0
((A_) , ) = np.shape(UpperCAmelCase )
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCAmelCase )
A_ = LinearConstraint(UpperCAmelCase , 0 , 0 )
A_ = Bounds(0 , self.regularization )
A_ = minimize(
UpperCAmelCase , np.ones(UpperCAmelCase ) , bounds=UpperCAmelCase , constraints=[ly_contraint] ).x
A_ = l_star
# calculating mean offset of separation plane to points
A_ = 0
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
A_ = s / n
def __A ( self : Dict , UpperCAmelCase : ndarray ):
A_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def _snake_case ( __snake_case : str = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _snake_case ( __snake_case : str = "" ):
"""simple docstring"""
if len(__snake_case ) == 0:
return True
_lowerCamelCase : Union[str, Any] = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCamelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCamelCase : str = character_freq_dict.get(__snake_case , 0 ) + 1
_lowerCamelCase : Optional[int] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _snake_case ( __snake_case : str = "" ):
"""simple docstring"""
print("""\nFor string = """ , __snake_case , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(__snake_case ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(__snake_case ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
UpperCAmelCase = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 88 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Optional[int] = to_pil_image(lowerCamelCase_ )
_lowercase , _lowercase : List[Any] = pil_image.size
_lowercase : Tuple = pytesseract.image_to_data(lowerCamelCase_ , lang=lowerCamelCase_ , output_type='dict' , config=lowerCamelCase_ )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_lowercase : List[Any] = [idx for idx, word in enumerate(lowerCamelCase_ ) if not word.strip()]
_lowercase : Any = [word for idx, word in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_lowercase : Union[str, Any] = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_lowercase : Dict = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_lowercase : List[str] = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_lowercase : Tuple = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowercase : str = []
for x, y, w, h in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase_ )
# finally, normalize the bounding boxes
_lowercase : Tuple = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCamelCase( _a ):
lowercase_ : Tuple = ["""pixel_values"""]
def __init__( self, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = PILImageResampling.BILINEAR, lowerCamelCase = True, lowerCamelCase = 1 / 2_55, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = "", **lowerCamelCase, ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Union[str, Any] = size if size is not None else {'height': 2_24, 'width': 2_24}
_lowercase : Dict = get_size_dict(lowerCamelCase)
_lowercase : int = do_resize
_lowercase : Any = size
_lowercase : List[Any] = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : Optional[Any] = rescale_value
_lowercase : Dict = do_normalize
_lowercase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_lowercase : List[str] = apply_ocr
_lowercase : Any = ocr_lang
_lowercase : Optional[int] = tesseract_config
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = PILImageResampling.BILINEAR, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray:
"""simple docstring"""
_lowercase : List[Any] = get_size_dict(lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''')
_lowercase : Any = (size['height'], size['width'])
return resize(lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = ChannelDimension.FIRST, **lowerCamelCase, ) -> PIL.Image.Image:
"""simple docstring"""
_lowercase : int = do_resize if do_resize is not None else self.do_resize
_lowercase : int = size if size is not None else self.size
_lowercase : int = get_size_dict(lowerCamelCase)
_lowercase : Tuple = resample if resample is not None else self.resample
_lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Any = image_mean if image_mean is not None else self.image_mean
_lowercase : Optional[int] = image_std if image_std is not None else self.image_std
_lowercase : int = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowercase : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowercase : Any = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowercase : str = make_list_of_images(lowerCamelCase)
if not valid_images(lowerCamelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.')
# All transformations expect numpy arrays.
_lowercase : List[Any] = [to_numpy_array(lowerCamelCase) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self, 'pytesseract')
_lowercase : Optional[Any] = []
_lowercase : Tuple = []
for image in images:
_lowercase , _lowercase : Optional[int] = apply_tesseract(lowerCamelCase, lowerCamelCase, lowerCamelCase)
words_batch.append(lowerCamelCase)
boxes_batch.append(lowerCamelCase)
if do_resize:
_lowercase : Tuple = [self.resize(image=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase) for image in images]
if do_rescale:
_lowercase : str = [self.rescale(image=lowerCamelCase, scale=lowerCamelCase) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase) for image in images]
_lowercase : List[str] = [to_channel_dimension_format(lowerCamelCase, lowerCamelCase) for image in images]
_lowercase : List[Any] = BatchFeature(data={'pixel_values': images}, tensor_type=lowerCamelCase)
if apply_ocr:
_lowercase : List[str] = words_batch
_lowercase : Tuple = boxes_batch
return data
| 89 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30_522, type=int)
__UpperCAmelCase = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
__UpperCAmelCase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCAmelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCAmelCase = [0] * args.vocab_size
for k, v in counter.items():
__UpperCAmelCase = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 90 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase: ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowerCamelCase: ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
_lowerCamelCase: str = "text"
_lowerCamelCase: str = "summary"
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"} | 91 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Tuple=18 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , ):
'''simple docstring'''
lowercase : Union[str, Any] =size if size is not None else {'''height''': 18, '''width''': 18}
lowercase : Union[str, Any] =parent
lowercase : Optional[int] =batch_size
lowercase : Optional[int] =num_channels
lowercase : List[str] =image_size
lowercase : Dict =min_resolution
lowercase : str =max_resolution
lowercase : int =do_resize
lowercase : int =size
lowercase : Union[str, Any] =apply_ocr
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Tuple =LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
lowercase : Tuple =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processing
lowercase : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase : Optional[int] =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
lowercase : Union[str, Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase : Dict =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowercase : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : Optional[int] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processing
lowercase : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase : Optional[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : Optional[Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# with apply_OCR = True
lowercase : Tuple =LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase : List[Any] =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
lowercase : Dict =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
lowercase : Tuple =image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase : Tuple =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
lowercase : Optional[Any] =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
lowercase : Union[str, Any] =LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
lowercase : str =image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 92 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A = logging.get_logger(__name__)
__A = Dict[str, Any]
__A = List[Prediction]
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = {}
if "threshold" in kwargs:
lowerCAmelCase__ :Optional[int] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = load_image(__UpperCAmelCase )
lowerCAmelCase__ :int = torch.IntTensor([[image.height, image.width]] )
lowerCAmelCase__ :Dict = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
lowerCAmelCase__ :Dict = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
lowerCAmelCase__ :Dict = target_size
return inputs
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = model_inputs.pop('target_size' )
lowerCAmelCase__ :int = self.model(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
lowerCAmelCase__ :Any = model_inputs['bbox']
return model_outputs
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.9 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = target_size[0].tolist()
def unnormalize(__UpperCAmelCase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
lowerCAmelCase__ , lowerCAmelCase__ :str = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCAmelCase__ :int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCAmelCase__ :List[str] = [unnormalize(__UpperCAmelCase ) for bbox in model_outputs['bbox'].squeeze(0 )]
lowerCAmelCase__ :Union[str, Any] = ['score', 'label', 'box']
lowerCAmelCase__ :Optional[int] = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for vals in zip(scores.tolist() , __UpperCAmelCase , __UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCAmelCase__ :Dict = self.image_processor.post_process_object_detection(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = raw_annotations[0]
lowerCAmelCase__ :Tuple = raw_annotation['scores']
lowerCAmelCase__ :Optional[int] = raw_annotation['labels']
lowerCAmelCase__ :str = raw_annotation['boxes']
lowerCAmelCase__ :List[Any] = scores.tolist()
lowerCAmelCase__ :Tuple = [self.model.config.idalabel[label.item()] for label in labels]
lowerCAmelCase__ :List[str] = [self._get_bounding_box(__UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCAmelCase__ :Optional[Any] = ['score', 'label', 'box']
lowerCAmelCase__ :Union[str, Any] = [
dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = box.int().tolist()
lowerCAmelCase__ :str = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 93 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Dict =ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase : Optional[Any] =1_2_8
elif "12-12" in model_name:
lowercase : Union[str, Any] =1_2
lowercase : Tuple =1_2
elif "14-14" in model_name:
lowercase : Optional[Any] =1_4
lowercase : List[str] =1_4
elif "16-16" in model_name:
lowercase : Union[str, Any] =1_6
lowercase : Optional[int] =1_6
else:
raise ValueError('''Model not supported''' )
lowercase : List[Any] ='''huggingface/label-files'''
if "speech-commands" in model_name:
lowercase : str =3_5
lowercase : Union[str, Any] ='''speech-commands-v2-id2label.json'''
else:
lowercase : Optional[int] =5_2_7
lowercase : str ='''audioset-id2label.json'''
lowercase : List[str] =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Tuple ={int(__A ): v for k, v in idalabel.items()}
lowercase : Dict =idalabel
lowercase : List[str] ={v: k for k, v in idalabel.items()}
return config
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
if "module.v" in name:
lowercase : int =name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
lowercase : Tuple =name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
lowercase : int =name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
lowercase : Optional[int] =name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase : Optional[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
lowercase : List[Any] =name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowercase : int =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : List[Any] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : List[str] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : str =name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase : Optional[int] =name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
lowercase : Any =name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
lowercase : Dict =name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def lowercase_ ( __A : str , __A : List[str] ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : Dict =orig_state_dict.pop(__A )
if "qkv" in key:
lowercase : Union[str, Any] =key.split('''.''' )
lowercase : Any =int(key_split[3] )
lowercase : int =config.hidden_size
if "weight" in key:
lowercase : str =val[:dim, :]
lowercase : int =val[dim : dim * 2, :]
lowercase : Tuple =val[-dim:, :]
else:
lowercase : Dict =val[:dim]
lowercase : str =val[dim : dim * 2]
lowercase : Optional[int] =val[-dim:]
else:
lowercase : List[Any] =val
return orig_state_dict
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
lowercase : str =[
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
@torch.no_grad()
def lowercase_ ( __A : str , __A : Optional[Any] , __A : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any =get_audio_spectrogram_transformer_config(__A )
lowercase : List[Any] ={
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
lowercase : Dict =model_name_to_url[model_name]
lowercase : Optional[Any] =torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' )
# remove some keys
remove_keys(__A )
# rename some keys
lowercase : Optional[Any] =convert_state_dict(__A , __A )
# load 🤗 model
lowercase : str =ASTForAudioClassification(__A )
model.eval()
model.load_state_dict(__A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase : List[Any] =-4.2677393 if '''speech-commands''' not in model_name else -6.845978
lowercase : Union[str, Any] =4.5689974 if '''speech-commands''' not in model_name else 5.5654526
lowercase : Optional[int] =1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
lowercase : List[Any] =ASTFeatureExtractor(mean=__A , std=__A , max_length=__A )
if "speech-commands" in model_name:
lowercase : Tuple =load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
lowercase : str =dataset[0]['''audio''']['''array''']
else:
lowercase : int =hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
lowercase , lowercase : Optional[int] =torchaudio.load(__A )
lowercase : Any =waveform.squeeze().numpy()
lowercase : Optional[int] =feature_extractor(__A , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
lowercase : Optional[int] =model(**__A )
lowercase : List[Any] =outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase : Union[str, Any] =torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase : str =torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase : Dict =torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase : List[str] =torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase : str =torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase : List[Any] =torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase : Any =torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase : Optional[Any] =torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , __A , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__A )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 94 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 95 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__lowerCamelCase = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Any:
__magic_name__: List[str] = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__magic_name__: Any = int(re.match(R""".*layer_(\d*).*""" , __UpperCAmelCase )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def a ( __UpperCAmelCase : List[str] ) -> Union[str, Any]:
if dtype == torch.bool:
return 1 / 8
__magic_name__: Optional[int] = re.search(R"""[^\d](\d+)$""" , str(__UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
__magic_name__: Dict = int(bit_search.groups()[0] )
return bit_size // 8
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) -> List[str]:
# Construct model
if bloom_config_file == "":
__magic_name__: Optional[Any] = BloomConfig()
else:
__magic_name__: Tuple = BloomConfig.from_json_file(__UpperCAmelCase )
if shard_model:
__magic_name__: Union[str, Any] = os.listdir(__UpperCAmelCase )
__magic_name__: Optional[int] = sorted(filter(lambda __UpperCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCAmelCase ) )
__magic_name__: str = {"""weight_map""": {}, """metadata""": {}}
__magic_name__: Optional[Any] = 0
__magic_name__: Dict = None
__magic_name__: Optional[int] = BloomConfig()
for j, file in enumerate(__UpperCAmelCase ):
print("""Processing file: {}""".format(__UpperCAmelCase ) )
__magic_name__: Tuple = None
for i in range(__UpperCAmelCase ):
# load all TP files
__magic_name__: int = file.replace("""model_00""" , f'model_0{i}' )
__magic_name__: Optional[Any] = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
__magic_name__: str = list(temp.keys() )
for key in keys:
__magic_name__: Any = temp.pop(__UpperCAmelCase )
if tensors is None:
__magic_name__: Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__: int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__: int = torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__: Union[str, Any] = tensors[key] / pretraining_tp
torch.save(
__UpperCAmelCase , os.path.join(
__UpperCAmelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__magic_name__: Optional[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__magic_name__: Union[str, Any] = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) )
__magic_name__: Tuple = BloomConfig()
__magic_name__: Optional[int] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__magic_name__: Optional[int] = total_size
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCAmelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
__magic_name__: str = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + """\n"""
f.write(__UpperCAmelCase )
else:
__magic_name__: Optional[int] = BloomModel(__UpperCAmelCase )
__magic_name__: List[str] = os.listdir(__UpperCAmelCase )
__magic_name__: Optional[Any] = sorted(filter(lambda __UpperCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCAmelCase ) )
__magic_name__: Any = None
for i, file in enumerate(__UpperCAmelCase ):
__magic_name__: List[Any] = None
for i in range(__UpperCAmelCase ):
# load all TP files
__magic_name__: Any = file.replace("""model_00""" , f'model_0{i}' )
__magic_name__: Dict = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
__magic_name__: Optional[Any] = list(temp.keys() )
for key in keys:
__magic_name__: Optional[Any] = temp.pop(__UpperCAmelCase )
if tensors is None:
__magic_name__: Tuple = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__: Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__: Dict = torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__: Optional[int] = tensors[key] / pretraining_tp
__magic_name__: Union[str, Any] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__magic_name__: Optional[Any] = set(other_keys.missing_keys )
else:
__magic_name__: Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__magic_name__: int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__magic_name__: List[str] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__magic_name__: Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCAmelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__lowerCamelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 96 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : pyspark.sql.DataFrame , SCREAMING_SNAKE_CASE_ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE_ : Optional[Features] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "arrow" , **SCREAMING_SNAKE_CASE_ : str , ) -> Optional[int]:
super().__init__(
split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = load_from_cache_file
lowercase_ = file_format
lowercase_ = Spark(
df=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , working_dir=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase ( self : int ) -> Optional[Any]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=SCREAMING_SNAKE_CASE_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 97 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=13 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : int=99 , lowerCAmelCase__ : Optional[Any]=32 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Union[str, Any]=37 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Tuple=50 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : str=None , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = use_labels
_UpperCamelCase = scope
def snake_case__ ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : int , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BertGenerationEncoder(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : List[str] , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = BertGenerationEncoder(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
_UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , **lowerCAmelCase__ : Dict , ) -> int:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = BertGenerationDecoder(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
# first forward pass
_UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , )
_UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['''hidden_states'''][0]
_UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['''hidden_states'''][0]
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , *lowerCAmelCase__ : List[Any] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BertGenerationDecoder(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Dict = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BertGenerationEncoderTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = '''bert'''
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
@slow
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_UpperCamelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
_UpperCamelCase = model(lowerCAmelCase__ )[0]
_UpperCamelCase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_UpperCamelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
_UpperCamelCase = model(lowerCAmelCase__ )[0]
_UpperCamelCase = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 98 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = inspect.getfile(accelerate.test_utils )
_lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
_lowerCamelCase = ["""accelerate""", """launch"""]
_lowerCamelCase = Path.home() / """.cache/huggingface/accelerate"""
_lowerCamelCase = """default_config.yaml"""
_lowerCamelCase = config_folder / config_file
_lowerCamelCase = config_folder / """_default_config.yaml"""
_lowerCamelCase = Path("""tests/test_configs""" )
@classmethod
def snake_case_ ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def snake_case_ ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def snake_case_ ( self ):
__a = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def snake_case_ ( self ):
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=__A ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(__A ), self.test_file_path] , env=os.environ.copy() )
def snake_case_ ( self ):
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = """test-tpu"""
_lowerCamelCase = """us-central1-a"""
_lowerCamelCase = """ls"""
_lowerCamelCase = ["""accelerate""", """tpu-config"""]
_lowerCamelCase = """cd /usr/share"""
_lowerCamelCase = """tests/test_samples/test_command_file.sh"""
_lowerCamelCase = """Running gcloud compute tpus tpu-vm ssh"""
def snake_case_ ( self ):
__a = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=__A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __A , )
def snake_case_ ( self ):
__a = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=__A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __A , )
def snake_case_ ( self ):
__a = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=__A )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __A , )
def snake_case_ ( self ):
__a = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=__A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __A , )
def snake_case_ ( self ):
__a = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=__A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __A , )
def snake_case_ ( self ):
__a = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=__A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __A , )
def snake_case_ ( self ):
__a = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=__A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __A , )
def snake_case_ ( self ):
__a = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=__A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __A , )
def snake_case_ ( self ):
__a = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=__A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __A , )
| 99 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
from math import sqrt
def __snake_case ( lowerCAmelCase_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( lowerCAmelCase_ = 1_0_0_0_1 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(F'{solution() = }')
| 100 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ : Optional[Any] =[
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 101 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__magic_name__ : Tuple = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def UpperCamelCase ():
UpperCamelCase : List[Any] = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE ) )
UpperCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE , """words.txt""" )
UpperCamelCase : Optional[Any] = """"""
with open(SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase : Dict = f.readline()
UpperCamelCase : int = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
UpperCamelCase : Union[str, Any] = [
word
for word in [sum(ord(SCREAMING_SNAKE_CASE ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 102 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase :
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : str=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[Any]=3_7 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=5_1_2 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=0 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = projection_dim
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
_snake_case = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = TFDPRContextEncoder(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = TFDPRQuestionEncoder(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = TFDPRReader(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A__ : Optional[int] = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
A__ : List[Any] = False
A__ : Any = False
A__ : Optional[int] = False
A__ : Optional[Any] = False
A__ : str = False
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = TFDPRModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRQuestionEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRReader.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
_snake_case = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
_snake_case = model(__lowerCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_snake_case = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 103 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
if args.student_type == "roberta":
A__ = False
elif args.student_type == "gpt2":
A__ = False
def _lowerCamelCase ( UpperCAmelCase_ : Any, UpperCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
if args.student_type == "roberta":
A__ = False
def _lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path", type=UpperCAmelCase_, required=UpperCAmelCase_, help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file", type=UpperCAmelCase_, required=UpperCAmelCase_, help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.", )
parser.add_argument(
"--student_type", type=UpperCAmelCase_, choices=["distilbert", "roberta", "gpt2"], required=UpperCAmelCase_, help="The student type (DistilBERT, RoBERTa).", )
parser.add_argument("--student_config", type=UpperCAmelCase_, required=UpperCAmelCase_, help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights", default=UpperCAmelCase_, type=UpperCAmelCase_, help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type", choices=["bert", "roberta", "gpt2"], required=UpperCAmelCase_, help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name", type=UpperCAmelCase_, required=UpperCAmelCase_, help="The teacher model." )
parser.add_argument("--temperature", default=2.0, type=UpperCAmelCase_, help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce", default=0.5, type=UpperCAmelCase_, help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm", default=0.0, type=UpperCAmelCase_, help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.", )
parser.add_argument("--alpha_clm", default=0.5, type=UpperCAmelCase_, help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse", default=0.0, type=UpperCAmelCase_, help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos", default=0.0, type=UpperCAmelCase_, help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop", default=0.15, type=UpperCAmelCase_, help="Proportion of tokens for which we need to make a prediction.", )
parser.add_argument("--word_mask", default=0.8, type=UpperCAmelCase_, help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep", default=0.1, type=UpperCAmelCase_, help="Proportion of tokens to keep." )
parser.add_argument("--word_rand", default=0.1, type=UpperCAmelCase_, help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing", default=0.7, type=UpperCAmelCase_, help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).", )
parser.add_argument("--token_counts", type=UpperCAmelCase_, help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask", action="store_true", help="If true, compute the distillation loss only the [MLM] prediction distribution.", )
parser.add_argument(
"--freeze_pos_embs", action="store_true", help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.", )
parser.add_argument(
"--freeze_token_type_embds", action="store_true", help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.", )
parser.add_argument("--n_epoch", type=UpperCAmelCase_, default=3, help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size", type=UpperCAmelCase_, default=5, help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size", action="store_false", help="If true, group sequences that have similar length into the same batch. Default is true.", )
parser.add_argument(
"--gradient_accumulation_steps", type=UpperCAmelCase_, default=50, help="Gradient accumulation for larger training batches.", )
parser.add_argument("--warmup_prop", default=0.05, type=UpperCAmelCase_, help="Linear warmup proportion." )
parser.add_argument("--weight_decay", default=0.0, type=UpperCAmelCase_, help="Weight decay if we apply some." )
parser.add_argument("--learning_rate", default=5e-4, type=UpperCAmelCase_, help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon", default=1e-6, type=UpperCAmelCase_, help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm", default=5.0, type=UpperCAmelCase_, help="Max gradient norm." )
parser.add_argument("--initializer_range", default=0.02, type=UpperCAmelCase_, help="Random initialization range." )
parser.add_argument(
"--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", )
parser.add_argument(
"--fp16_opt_level", type=UpperCAmelCase_, default="O1", help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
), )
parser.add_argument("--n_gpu", type=UpperCAmelCase_, default=1, help="Number of GPUs in the node." )
parser.add_argument("--local_rank", type=UpperCAmelCase_, default=-1, help="Distributed training - Local rank" )
parser.add_argument("--seed", type=UpperCAmelCase_, default=56, help="Random seed" )
parser.add_argument("--log_interval", type=UpperCAmelCase_, default=500, help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval", type=UpperCAmelCase_, default=4000, help="Checkpoint interval." )
A__ = parser.parse_args()
sanity_checks(UpperCAmelCase_ )
# ARGS #
init_gpu_params(UpperCAmelCase_ )
set_seed(UpperCAmelCase_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path, "parameters.json" ), "w" ) as f:
json.dump(vars(UpperCAmelCase_ ), UpperCAmelCase_, indent=4 )
git_log(args.dump_path )
A__ , A__ , A__ = MODEL_CLASSES[args.student_type]
A__ , A__ , A__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
A__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
A__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
A__ = tokenizer.all_special_tokens.index(UpperCAmelCase_ )
A__ = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
A__ = special_tok_ids
A__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file, "rb" ) as fp:
A__ = pickle.load(UpperCAmelCase_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts, "rb" ) as fp:
A__ = pickle.load(UpperCAmelCase_ )
A__ = np.maximum(UpperCAmelCase_, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
A__ = 0.0 # do not predict special tokens
A__ = torch.from_numpy(UpperCAmelCase_ )
else:
A__ = None
A__ = LmSeqsDataset(params=UpperCAmelCase_, data=UpperCAmelCase_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
A__ = student_config_class.from_pretrained(args.student_config )
A__ = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
A__ = student_model_class.from_pretrained(args.student_pretrained_weights, config=UpperCAmelCase_ )
else:
A__ = student_model_class(UpperCAmelCase_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
A__ = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=UpperCAmelCase_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCAmelCase_, UpperCAmelCase_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCAmelCase_, UpperCAmelCase_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
A__ = Distiller(
params=UpperCAmelCase_, dataset=UpperCAmelCase_, token_probs=UpperCAmelCase_, student=UpperCAmelCase_, teacher=UpperCAmelCase_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 104 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase_ :
__a : CommonSchedulerState
# setable values
__a : jnp.ndarray
__a : jnp.ndarray
__a : Optional[int] = None
@classmethod
def snake_case ( cls ,snake_case__ ,snake_case__ ,snake_case__ ):
return cls(common=snake_case__ ,init_noise_sigma=snake_case__ ,timesteps=snake_case__ )
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : DDPMSchedulerState
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a : jnp.dtype
@property
def snake_case ( self ):
return True
@register_to_config
def __init__( self ,snake_case__ = 1000 ,snake_case__ = 0.0001 ,snake_case__ = 0.02 ,snake_case__ = "linear" ,snake_case__ = None ,snake_case__ = "fixed_small" ,snake_case__ = True ,snake_case__ = "epsilon" ,snake_case__ = jnp.floataa ,):
SCREAMING_SNAKE_CASE_ : Any = dtype
def snake_case ( self ,snake_case__ = None ):
if common is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(1.0 ,dtype=self.dtype )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.arange(0 ,self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ ,init_noise_sigma=snake_case__ ,timesteps=snake_case__ ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ):
return sample
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = () ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ : Optional[int] = (jnp.arange(0 ,snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ ,timesteps=snake_case__ ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : List[str] = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ : int = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE_ : Dict = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE_ : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.clip(snake_case__ ,a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE_ : str = jnp.log(jnp.clip(snake_case__ ,a_min=1E-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE_ : Any = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = variance
SCREAMING_SNAKE_CASE_ : Tuple = state.common.betas[t]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE_ : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : int = timestep
if key is None:
SCREAMING_SNAKE_CASE_ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = jnp.split(snake_case__ ,sample.shape[1] ,axis=1 )
else:
SCREAMING_SNAKE_CASE_ : List[str] = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE_ : List[Any] = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ : int = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
SCREAMING_SNAKE_CASE_ : List[str] = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE_ : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ : int = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ : Tuple = jnp.clip(snake_case__ ,-1 ,1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_ : int = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE_ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE_ : int = jax.random.split(snake_case__ ,num=1 )
SCREAMING_SNAKE_CASE_ : List[str] = jax.random.normal(snake_case__ ,shape=model_output.shape ,dtype=self.dtype )
return (self._get_variance(snake_case__ ,snake_case__ ,predicted_variance=snake_case__ ) ** 0.5) * noise
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.where(t > 0 ,random_variance() ,jnp.zeros(model_output.shape ,dtype=self.dtype ) )
SCREAMING_SNAKE_CASE_ : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ ,state=snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
return add_noise_common(state.common ,snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
return get_velocity_common(state.common ,snake_case__ ,snake_case__ ,snake_case__ )
def __len__( self ):
return self.config.num_train_timesteps
| 105 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
def lowerCamelCase_ ( lowerCAmelCase__ : list[int] ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(lowerCAmelCase__ , (list, tuple) ) or not all(
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase__ , max_till_now * number )
A = min(lowerCAmelCase__ , min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_prod | 106 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : float ):
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError('Length must be a positive.' )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _SCREAMING_SNAKE_CASE ( __snake_case : float ):
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError('Length must be a positive.' )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = """laion/clap-htsat-unfused"""
_UpperCAmelCase = tempfile.mkdtemp()
def lowerCamelCase ( self : List[str] , **lowerCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase )
def lowerCamelCase ( self : List[Any] , **lowerCamelCase : Any ) -> Any:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_UpperCAmelCase = self.get_feature_extractor(do_normalize=lowerCamelCase , padding_value=1.0 )
_UpperCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = floats_list((3, 1000) )
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" )
_UpperCAmelCase = processor(audios=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = """This is a test string"""
_UpperCAmelCase = processor(text=lowerCamelCase )
_UpperCAmelCase = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(lowerCamelCase )
_UpperCAmelCase = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , ) | 108 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a = logging.get_logger(__name__)
a = {"vocab_file": "spiece.model"}
a = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
a = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class __a ( _snake_case ):
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : List[str]=False ,lowerCamelCase : Any=False ,lowerCamelCase : List[str]=None ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Dict=None ,lowerCamelCase : str=None ,lowerCamelCase : Optional[Dict[str, Any]] = None ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
__SCREAMING_SNAKE_CASE = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__SCREAMING_SNAKE_CASE = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__SCREAMING_SNAKE_CASE = """<|endoftext|>""" if eos_token is None else eos_token
__SCREAMING_SNAKE_CASE = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token
__SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token
else:
__SCREAMING_SNAKE_CASE = """<pad>""" if pad_token is None else pad_token
__SCREAMING_SNAKE_CASE = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase ,remove_space=lowerCamelCase ,keep_accents=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,unk_token=lowerCamelCase ,pad_token=lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
__SCREAMING_SNAKE_CASE = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__SCREAMING_SNAKE_CASE = re.compile(
f"""[{''.join(map(lowerCamelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Union[str, Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub("""""" ,lowerCamelCase )
# Normalize whitespaces
__SCREAMING_SNAKE_CASE = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFC""" ,lowerCamelCase )
return text
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,**lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.preprocess_text(lowerCamelCase )
return self.sp_model.encode(lowerCamelCase ,out_type=lowerCamelCase )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase )
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : str ):
'''simple docstring'''
return out_string
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : str ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase ,"""wb""" ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Union[str, List[str]] ,lowerCamelCase : Union[str, bool] = False ):
'''simple docstring'''
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = self.preprocess_text(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.sp_model.encode(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = [self.preprocess_text(lowerCamelCase ) for t in text]
__SCREAMING_SNAKE_CASE = self.sp_model.encode(lowerCamelCase )
if return_tensors is True or return_tensors == "pt":
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase )
return token_ids
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : "Conversation" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__SCREAMING_SNAKE_CASE = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(lowerCamelCase ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=lowerCamelCase )
| 109 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase ( _snake_case ,_snake_case=False ):
try:
UpperCAmelCase__ : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase__ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase__ : Tuple = strtobool(_snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase ( _snake_case ):
return unittest.skip('Test was skipped' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(_run_slow_tests ,'test is slow' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(not torch.cuda.is_available() ,'test requires only a CPU' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(torch.cuda.is_available() ,'test requires a GPU' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_xpu_available() ,'test requires a XPU' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_mps_available() ,'test requires a `mps` backend support in `torch`' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,'test requires the Hugging Face suite' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_bnb_available() ,'test requires the bitsandbytes library' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_tpu_available() ,'test requires TPU' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(torch.cuda.device_count() == 1 ,'test requires a GPU' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(torch.xpu.device_count() == 1 ,'test requires a XPU' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(torch.cuda.device_count() > 1 ,'test requires multiple GPUs' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(torch.xpu.device_count() > 1 ,'test requires multiple XPUs' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_safetensors_available() ,'test requires safetensors' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_deepspeed_available() ,'test requires DeepSpeed' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_torch_version('>=' ,'1.12.0' ) ,'test requires torch version >= 1.12.0' )(_snake_case )
def lowerCamelCase ( _snake_case=None ,_snake_case=None ):
if test_case is None:
return partial(_snake_case ,version=_snake_case )
return unittest.skipUnless(is_torch_version('>=' ,_snake_case ) ,F'''test requires torch version >= {version}''' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_tensorboard_available() ,'test requires Tensorboard' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_wandb_available() ,'test requires wandb' )(_snake_case )
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(is_comet_ml_available() ,'test requires comet_ml' )(_snake_case )
UpperCamelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase ( _snake_case ):
return unittest.skipUnless(
_atleast_one_tracker_available ,'test requires at least one tracker to be available and for `comet_ml` to not be installed' ,)(_snake_case )
class a ( unittest.TestCase ):
UpperCamelCase : str = True
@classmethod
def __snake_case ( cls ):
UpperCAmelCase__ : List[Any] = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class a ( unittest.TestCase ):
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class a ( unittest.TestCase ):
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Tuple = AcceleratorState()
UpperCAmelCase__ : Optional[Any] = tensor[None].clone().to(state.device )
UpperCAmelCase__ : Dict = gather(_snake_case ).cpu()
UpperCAmelCase__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,_snake_case ):
return False
return True
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = returncode
UpperCAmelCase__ : Optional[int] = stdout
UpperCAmelCase__ : Optional[int] = stderr
async def lowerCamelCase ( _snake_case ,_snake_case ):
while True:
UpperCAmelCase__ : int = await stream.readline()
if line:
callback(_snake_case )
else:
break
async def lowerCamelCase ( _snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=False ,_snake_case=False ):
if echo:
print('\nRunning: ' ,' '.join(_snake_case ) )
UpperCAmelCase__ : Tuple = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_snake_case ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_snake_case ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : List[Any] = []
def tee(_snake_case ,_snake_case ,_snake_case ,_snake_case="" ):
UpperCAmelCase__ : Tuple = line.decode('utf-8' ).rstrip()
sink.append(_snake_case )
if not quiet:
print(_snake_case ,_snake_case ,file=_snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda _snake_case : tee(_snake_case ,_snake_case ,sys.stdout ,label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda _snake_case : tee(_snake_case ,_snake_case ,sys.stderr ,label='stderr:' ) ) ),
] ,timeout=_snake_case ,)
return _RunOutput(await p.wait() ,_snake_case ,_snake_case )
def lowerCamelCase ( _snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=180 ,_snake_case=False ,_snake_case=True ):
UpperCAmelCase__ : List[Any] = asyncio.get_event_loop()
UpperCAmelCase__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(_snake_case ,env=_snake_case ,stdin=_snake_case ,timeout=_snake_case ,quiet=_snake_case ,echo=_snake_case ) )
UpperCAmelCase__ : Union[str, Any] = ' '.join(_snake_case )
if result.returncode > 0:
UpperCAmelCase__ : Tuple = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class a ( lowercase ):
pass
def lowerCamelCase ( _snake_case ,_snake_case=False ):
try:
UpperCAmelCase__ : Dict = subprocess.check_output(_snake_case ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_snake_case ,'decode' ):
UpperCAmelCase__ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(_snake_case )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 110 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: Optional[Any] = logging.get_logger(__name__)
_A: Any = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : int = "lilt"
def __init__( self , __A=30_522 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=0 , __A="absolute" , __A=None , __A=4 , __A=1_024 , **__A , ):
super().__init__(pad_token_id=_snake_case , **_snake_case )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = classifier_dropout
__UpperCAmelCase = channel_shrink_ratio
__UpperCAmelCase = max_ad_position_embeddings
| 126 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=6_4 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=[1, 1_6, 4, 4] , _A=None , ):
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : Tuple = patch_size
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : Any = is_training
UpperCamelCase : List[str] = use_labels
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Optional[int] = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : List[Any] = scope
UpperCamelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCamelCase : List[str] = (self.image_size // 3_2) ** 2
UpperCamelCase : Optional[int] = num_patches + 1
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 1_6, 3_2],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCamelCase : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Any = self.type_sequence_label_size
UpperCamelCase : Any = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCamelCase : str = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = config_and_inputs
UpperCamelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Any = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Tuple = False
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ViTHybridModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=3_7 )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(_snake_case )
UpperCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCamelCase : int = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _a ( self ):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCamelCase ():
UpperCamelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
UpperCamelCase : Dict = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Optional[int] = image_processor(images=_snake_case , return_tensors="""pt""" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**_snake_case )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _snake_case )
UpperCamelCase : Any = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
@slow
@require_accelerate
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
UpperCamelCase : str = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
UpperCamelCase : Optional[Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=_snake_case , return_tensors="""pt""" )
UpperCamelCase : List[str] = model(**_snake_case )
UpperCamelCase : int = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCamelCase : str = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 102 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
"""simple docstring"""
import os
import string
import sys
_SCREAMING_SNAKE_CASE = 1 << 8
_SCREAMING_SNAKE_CASE = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
_SCREAMING_SNAKE_CASE = KEYMAP["""up"""]
_SCREAMING_SNAKE_CASE = KEYMAP["""left"""]
if sys.platform == "win32":
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
_SCREAMING_SNAKE_CASE = ord(str(i))
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
__snake_case = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCamelCase ) == 0:
# Read the keystroke
__snake_case = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__snake_case = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__snake_case = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__UpperCamelCase )
if ord(__UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
__snake_case = chr(KEYMAP["esc"] )
except KeyError:
__snake_case = cha[1]
else:
__snake_case = ch.decode(__UpperCamelCase )
else:
__snake_case = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__snake_case = sys.stdin.fileno()
__snake_case = termios.tcgetattr(__UpperCamelCase )
try:
tty.setraw(__UpperCamelCase )
__snake_case = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCamelCase , termios.TCSADRAIN , __UpperCamelCase )
return ch
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
__snake_case = get_raw_chars()
if ord(__UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCamelCase ) == KEYMAP["esc"]:
__snake_case = get_raw_chars()
if ord(__UpperCamelCase ) == KEYMAP["mod_int"]:
__snake_case = get_raw_chars()
if ord(__UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 163 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
"""simple docstring"""
import numpy as np
import datasets
__magic_name__ = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
__magic_name__ = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
__magic_name__ = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCamelCase__ = np.array(_snake_case )
lowerCamelCase__ = np.array(_snake_case )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
lowerCamelCase__ = X - np.mean(_snake_case )
lowerCamelCase__ = np.cov(reference_distribution.T )
try:
lowerCamelCase__ = np.linalg.inv(_snake_case )
except np.linalg.LinAlgError:
lowerCamelCase__ = np.linalg.pinv(_snake_case )
lowerCamelCase__ = np.dot(_snake_case , _snake_case )
lowerCamelCase__ = np.dot(_snake_case , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 129 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase =25_6047
UpperCamelCase =25_6145
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
__a : int = NllbTokenizer
__a : Optional[int] = NllbTokenizerFast
__a : Optional[int] = True
__a : Tuple = True
__a : Optional[int] = {}
def _UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ : Optional[int] = NllbTokenizer(_snake_case , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = NllbTokenizer(_snake_case , keep_accents=_snake_case )
UpperCamelCase_ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCamelCase_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase_ : str = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase_ : Tuple = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase_ : str = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
UpperCamelCase_ : str = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
UpperCamelCase_ : List[str] = tempfile.mkdtemp()
UpperCamelCase_ : List[Any] = tokenizer_r.save_pretrained(_snake_case )
UpperCamelCase_ : Union[str, Any] = tokenizer_p.save_pretrained(_snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCamelCase_ : Optional[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_snake_case , _snake_case )
# Checks everything loads correctly in the same way
UpperCamelCase_ : Optional[int] = tokenizer_r.from_pretrained(_snake_case )
UpperCamelCase_ : Optional[int] = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
shutil.rmtree(_snake_case )
# Save tokenizer rust, legacy_format=True
UpperCamelCase_ : int = tempfile.mkdtemp()
UpperCamelCase_ : Tuple = tokenizer_r.save_pretrained(_snake_case , legacy_format=_snake_case )
UpperCamelCase_ : Optional[Any] = tokenizer_p.save_pretrained(_snake_case )
# Checks it save with the same files
self.assertSequenceEqual(_snake_case , _snake_case )
# Checks everything loads correctly in the same way
UpperCamelCase_ : Optional[int] = tokenizer_r.from_pretrained(_snake_case )
UpperCamelCase_ : List[Any] = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
shutil.rmtree(_snake_case )
# Save tokenizer rust, legacy_format=False
UpperCamelCase_ : Dict = tempfile.mkdtemp()
UpperCamelCase_ : List[Any] = tokenizer_r.save_pretrained(_snake_case , legacy_format=_snake_case )
UpperCamelCase_ : Any = tokenizer_p.save_pretrained(_snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase_ : str = tokenizer_r.from_pretrained(_snake_case )
UpperCamelCase_ : str = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
shutil.rmtree(_snake_case )
@require_torch
def _UpperCAmelCase ( self ):
if not self.test_seqaseq:
return
UpperCamelCase_ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
UpperCamelCase_ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for"""
""" Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase_ : Dict = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
UpperCamelCase_ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=_snake_case , tgt_texts=_snake_case , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCamelCase_ : Dict = tokenizer.prepare_seqaseq_batch(
_snake_case , tgt_texts=_snake_case , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCamelCase_ : List[str] = tokenizer.prepare_seqaseq_batch(
src_texts=_snake_case , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , _snake_case )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase_ : Tuple = [AddedToken("""<special>""" , lstrip=_snake_case )]
UpperCamelCase_ : str = self.rust_tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , **_snake_case )
UpperCamelCase_ : Optional[int] = tokenizer_r.encode("""Hey this is a <special> token""" )
UpperCamelCase_ : List[str] = tokenizer_r.encode("""<special>""" , add_special_tokens=_snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
UpperCamelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , **_snake_case )
UpperCamelCase_ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" )
UpperCamelCase_ : Any = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
"""simple docstring"""
__a : int = "facebook/nllb-200-distilled-600M"
__a : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__a : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__a : List[Any] = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def _UpperCAmelCase ( cls ):
UpperCamelCase_ : List[Any] = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
UpperCamelCase_ : Tuple = 1
return cls
def _UpperCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def _UpperCAmelCase ( self ):
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase_ : Tuple = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
UpperCamelCase_ : Optional[Any] = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
UpperCamelCase_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _snake_case )
UpperCamelCase_ : List[str] = 10
UpperCamelCase_ : Optional[Any] = self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _snake_case )
self.assertEqual(len(_snake_case ) , _snake_case )
def _UpperCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = tempfile.mkdtemp()
UpperCamelCase_ : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
UpperCamelCase_ : Optional[Any] = NllbTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCamelCase_ : Optional[int] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCamelCase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(_snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors="""pt""" )
UpperCamelCase_ : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors="""pt""" )
UpperCamelCase_ : Any = targets["""input_ids"""]
UpperCamelCase_ : List[str] = shift_tokens_right(
_snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(_snake_case ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_60_47, 70, 73_56, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_60_57,
} , )
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = True
UpperCamelCase_ : Optional[Any] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
UpperCamelCase_ : str = False
UpperCamelCase_ : Tuple = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 208 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : str ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(_snake_case ,safety_checker=_snake_case )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(_snake_case ,_snake_case ,_snake_case )
# shard inputs and rng
__A = replicate(_snake_case )
__A = jax.random.split(_snake_case ,jax.device_count() )
__A = shard(_snake_case )
__A = shard(_snake_case )
__A = shard(_snake_case )
__A = pipeline(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,jit=_snake_case )
__A = output.images.reshape(_snake_case ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , __lowerCamelCase : Any , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = 30
lowerCAmelCase__ = self.seq_length + self.mem_len
lowerCAmelCase__ = 15
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = [10, 50, 80]
lowerCAmelCase__ = 32
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1_28
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
lowerCAmelCase__ = None
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = 3
lowerCAmelCase__ = self.vocab_size - 1
lowerCAmelCase__ = 0.01
def A__ ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def A__ ( self : Tuple ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def A__ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFTransfoXLModel(_snake_case )
lowerCAmelCase__ , lowerCAmelCase__ = model(_snake_case ).to_tuple()
lowerCAmelCase__ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowerCAmelCase__ , lowerCAmelCase__ = model(_snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A__ ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = TFTransfoXLLMHeadModel(_snake_case )
lowerCAmelCase__ , lowerCAmelCase__ = model(_snake_case ).to_tuple()
lowerCAmelCase__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowerCAmelCase__ , lowerCAmelCase__ = model(_snake_case ).to_tuple()
lowerCAmelCase__ , lowerCAmelCase__ = model([input_ids_a, mems_a] ).to_tuple()
lowerCAmelCase__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowerCAmelCase__ , lowerCAmelCase__ = model(_snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A__ ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = TFTransfoXLForSequenceClassification(_snake_case )
lowerCAmelCase__ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ (UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Optional[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase_ : str = () if is_tf_available() else ()
lowercase_ : Optional[int] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase_ : List[str] = False
lowercase_ : int = False
lowercase_ : Tuple = False
lowercase_ : Dict = False
def A__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFTransfoXLModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=_snake_case , d_embed=37 )
def A__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self : Any ):
"""simple docstring"""
self.model_tester.set_seed()
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_snake_case )
def A__ ( self : Dict ):
"""simple docstring"""
self.model_tester.set_seed()
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_snake_case )
def A__ ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_snake_case )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCAmelCase__ = model.get_output_embeddings()
assert isinstance(_snake_case , tf.keras.layers.Layer )
lowerCAmelCase__ = model.get_bias()
assert name is None
else:
lowerCAmelCase__ = model.get_output_embeddings()
assert x is None
lowerCAmelCase__ = model.get_bias()
assert name is None
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def A__ ( self : str ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFTransfoXLModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_tf
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def A__ ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCAmelCase__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCAmelCase__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCAmelCase__ = model.generate(_snake_case , max_length=2_00 , do_sample=_snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() , _snake_case )
| 615 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__lowerCamelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__(self : List[str] , *A__ : Any , **A__ : Dict ) -> str:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 310 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = HfArgumentParser(__UpperCamelCase )
_UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
_UpperCamelCase : int = TensorFlowBenchmark(args=__UpperCamelCase )
try:
_UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCamelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_UpperCamelCase : Tuple = " ".join(str(__UpperCamelCase ).split(" " )[:-1] )
_UpperCamelCase : Dict = ""
_UpperCamelCase : Optional[int] = eval(str(__UpperCamelCase ).split(" " )[-1] )
_UpperCamelCase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCamelCase : List[Any] = full_error_msg + begin_error_msg + str(__UpperCamelCase )
raise ValueError(__UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 624 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = ["input_features"]
def __init__( self : Dict , UpperCAmelCase : str=80 , UpperCAmelCase : List[Any]=16000 , UpperCAmelCase : str=160 , UpperCAmelCase : Any=30 , UpperCAmelCase : Dict=400 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Optional[Any] , ) -> Optional[Any]:
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
lowerCamelCase__ : Tuple = n_fft
lowerCamelCase__ : Union[str, Any] = hop_length
lowerCamelCase__ : Tuple = chunk_length
lowerCamelCase__ : int = chunk_length * sampling_rate
lowerCamelCase__ : Dict = self.n_samples // hop_length
lowerCamelCase__ : Any = sampling_rate
lowerCamelCase__ : int = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_snake_case , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=_snake_case , norm='slaney' , mel_scale='slaney' , )
def A_ ( self : List[str] , UpperCAmelCase : np.array ) -> Any:
lowerCamelCase__ : int = spectrogram(
_snake_case , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
lowerCamelCase__ : Any = log_spec[:, :-1]
lowerCamelCase__ : Optional[Any] = np.maximum(_snake_case , log_spec.max() - 8.0 )
lowerCamelCase__ : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def A_ ( UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : float = 0.0 ) -> Tuple:
if attention_mask is not None:
lowerCamelCase__ : List[str] = np.array(_snake_case , np.intaa )
lowerCamelCase__ : List[Any] = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
lowerCamelCase__ : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase__ : Dict = padding_value
normed_input_values.append(_snake_case )
else:
lowerCamelCase__ : Optional[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : str , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[str] = "max_length" , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : Optional[int] , ) -> List[Any]:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase__ : Optional[Any] = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase__ : Optional[int] = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
lowerCamelCase__ : str = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : str = [np.asarray([raw_speech] ).T]
lowerCamelCase__ : str = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
lowerCamelCase__ : List[str] = self.pad(
_snake_case , padding=_snake_case , max_length=max_length if max_length else self.n_samples , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase__ : List[str] = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
lowerCamelCase__ : Optional[Any] = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
lowerCamelCase__ : Optional[Any] = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
lowerCamelCase__ : Tuple = [self._np_extract_fbank_features(_snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] , _snake_case ):
lowerCamelCase__ : Union[str, Any] = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase__ : Any = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase__ : Union[str, Any] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase__ : List[Any] = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 295 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.