code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def UpperCAmelCase__ ( _A : Dict = 10_00 ):
'''simple docstring'''
a__ =2**power
a__ =0
while n:
a__ =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 188 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 2_56,
}
__lowercase : List[Any] = '''▁'''
class __UpperCamelCase ( _lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = RemBertTokenizer
def __init__( self , __a=None , __a=None , __a=True , __a=True , __a=False , __a="[CLS]" , __a="[SEP]" , __a="<unk>" , __a="[SEP]" , __a="<pad>" , __a="[CLS]" , __a="[MASK]" , **__a , ):
'''simple docstring'''
__a : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
__a : Any = do_lower_case
__a : Union[str, Any] = remove_space
__a : Optional[Any] = keep_accents
__a : str = vocab_file
__a : str = False if not self.vocab_file else True
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : Union[str, Any] = [self.sep_token_id]
__a : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : int = [self.sep_token_id]
__a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error('Vocabulary path ({}) should be a directory'.format(__a ) )
return
__a : Dict = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 27 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 0 |
import numpy
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
A_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
A_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
A_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
A_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
A_ : Optional[int] = numpy.zeros(output_array.shape )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
A_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
A_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
A_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
A_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
A_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
A_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = input_arr
A_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
A_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
A_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ( ):
A_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
A_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
A_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 186 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 0 |
"""simple docstring"""
lowerCAmelCase__ : List[Any] = '''Input must be a string of 8 numbers plus letter'''
lowerCAmelCase__ : Any = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def a_ ( lowerCamelCase ):
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase__ = f'''Expected string as input, found {type(_lowercase ).__name__}'''
raise TypeError(_lowercase )
UpperCAmelCase__ = spanish_id.replace('-' , '' ).upper()
if len(_lowercase ) != 9:
raise ValueError(_lowercase )
try:
UpperCAmelCase__ = int(spanish_id_clean[0:8] )
UpperCAmelCase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowercase ) from ex
if letter.isdigit():
raise ValueError(_lowercase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 0 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] = 13 , _UpperCAmelCase : Optional[int] = 64 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : List[str] = 3 , _UpperCAmelCase : Any = 3 , _UpperCAmelCase : Union[str, Any] = True , _UpperCAmelCase : str = True , _UpperCAmelCase : Tuple = 128 , _UpperCAmelCase : Union[str, Any]=[16, 32, 64, 128] , _UpperCAmelCase : Any = 7 , _UpperCAmelCase : int = 4 , _UpperCAmelCase : str = 37 , _UpperCAmelCase : Tuple = "gelu" , _UpperCAmelCase : Dict = 0.1 , _UpperCAmelCase : Any = 0.1 , _UpperCAmelCase : Optional[Any] = 10 , _UpperCAmelCase : Dict = 0.02 , _UpperCAmelCase : Dict = 2 , _UpperCAmelCase : str = 1 , _UpperCAmelCase : Optional[Any] = 128 , _UpperCAmelCase : Union[str, Any] = [2, 2, 2, 2] , _UpperCAmelCase : int = 2 , _UpperCAmelCase : List[str] = 2 , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = encoder_stride
_A = num_attention_outputs
_A = embed_dim
_A = embed_dim + 1
_A = resolution
_A = depths
_A = hidden_sizes
_A = dim
_A = mlp_expansion_ratio
def lowerCAmelCase_ ( self : str ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : str ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ):
_A = TFEfficientFormerModel(config=_UpperCAmelCase )
_A = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
_A = self.type_sequence_label_size
_A = TFEfficientFormerForImageClassification(_UpperCAmelCase )
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = TFEfficientFormerForImageClassification(_UpperCAmelCase )
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : int ):
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Tuple = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : str = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Tuple = False
def lowerCAmelCase_ ( self : List[Any] ):
_A = TFEfficientFormerModelTester(self )
_A = ConfigTester(
self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase_ ( self : List[str] ):
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
def check_hidden_states_output(_UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ):
_A = model_class(_UpperCAmelCase )
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) , training=_UpperCAmelCase )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_A = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_A = seq_length * self.model_tester.chunk_length
else:
_A = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_A = outputs.decoder_hidden_states
self.asseretIsInstance(_UpperCAmelCase , (list, tuple) )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_A = getattr(self.model_tester , 'seq_length' , _UpperCAmelCase )
_A = getattr(self.model_tester , 'decoder_seq_length' , _UpperCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str=False ):
_A = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEfficientFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = getattr(self.model_tester , 'seq_length' , _UpperCAmelCase )
_A = getattr(self.model_tester , 'encoder_seq_length' , _UpperCAmelCase )
_A = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
_A = getattr(self.model_tester , 'chunk_length' , _UpperCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_A = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_A = True
_A = False
_A = True
_A = model_class(_UpperCAmelCase )
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) , training=_UpperCAmelCase )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(_UpperCAmelCase )
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) , training=_UpperCAmelCase )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase_ ( self : List[str] ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_A = model_class(_UpperCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_A = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_UpperCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_A = model(_UpperCAmelCase )
self.assertTrue(outputs_dict is not None )
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_A = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='tf' )
# forward pass
_A = model(**_UpperCAmelCase , training=_UpperCAmelCase )
# verify the logits
_A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_A = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : Tuple ):
_A = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='tf' )
# forward pass
_A = model(**_UpperCAmelCase , training=_UpperCAmelCase )
# verify the logits
_A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_A = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 315 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCamelCase : Dict = get_tests_dir('fixtures')
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
# A mock response for an HTTP head request to emulate server down
lowercase = mock.Mock()
lowercase = 5_0_0
lowercase = {}
lowercase = HTTPError
lowercase = {}
# Download this model to make sure it's in the cache.
lowercase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
lowercase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowercase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def UpperCamelCase_ ( cls ):
lowercase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
lowercase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='test-feature-extractor' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
lowercase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def UpperCamelCase_ ( self ):
lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
lowercase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
lowercase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def UpperCamelCase_ ( self ):
CustomFeatureExtractor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
lowercase = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 220 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
A = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
__a : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = '''A painting of a squirrel eating a burger '''
__a : int = torch.manual_seed(0 )
__a : str = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
__a : str = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[Any] = generator.manual_seed(0 )
__a : Any = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowerCamelCase ( self ):
__a : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : int = '''A painting of a squirrel eating a burger '''
__a : Dict = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__a : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : List[Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 160 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCAmelCase :Union[str, Any] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 331 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a_ : int = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
inspect_dataset(_lowercase , _lowercase )
lowerCamelCase_ = path + '''.py'''
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
inspect_metric(_lowercase , _lowercase )
lowerCamelCase_ = path + '''.py'''
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = get_dataset_config_info(_lowercase , config_name=_lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
with pytest.raises(_lowercase ):
get_dataset_config_info(_lowercase , config_name=_lowercase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = get_dataset_config_names(_lowercase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ):
lowerCamelCase_ = get_dataset_infos(_lowercase )
assert list(infos.keys() ) == expected_configs
lowerCamelCase_ = expected_configs[0]
assert expected_config in infos
lowerCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = get_dataset_infos(_lowercase )
assert expected_config in infos
lowerCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
with pytest.raises(_lowercase ):
get_dataset_split_names(_lowercase , config_name=_lowercase )
| 55 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase_ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase_ = '''UperNetConfig'''
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , _A : List[Any] , _A : Dict , _A : str , _A : Dict = 0 , _A : Optional[int] = False , _A : Optional[Any] = 1 , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
in_channels=_A , out_channels=_A , kernel_size=_A , padding=_A , bias=_A , dilation=_A , )
__SCREAMING_SNAKE_CASE : Any = nn.BatchNormad(_A )
__SCREAMING_SNAKE_CASE : List[str] = nn.ReLU()
def UpperCAmelCase__ ( self : str , _A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.conv(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.batch_norm(_A )
__SCREAMING_SNAKE_CASE : Tuple = self.activation(_A )
return output
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Union[str, Any] , _A : str , _A : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Any = [
nn.AdaptiveAvgPoolad(_A ),
UperNetConvModule(_A , _A , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_A ) , _A )
def UpperCAmelCase__ ( self : int , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = input
for layer in self.layers:
__SCREAMING_SNAKE_CASE : Optional[int] = layer(_A )
return hidden_state
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , _A : Tuple , _A : List[str] , _A : int , _A : Any ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Any = pool_scales
__SCREAMING_SNAKE_CASE : List[Any] = align_corners
__SCREAMING_SNAKE_CASE : str = in_channels
__SCREAMING_SNAKE_CASE : Tuple = channels
__SCREAMING_SNAKE_CASE : List[Any] = []
for i, pool_scale in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[Any] = UperNetPyramidPoolingBlock(pool_scale=_A , in_channels=_A , channels=_A )
self.blocks.append(_A )
self.add_module(str(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = []
for ppm in self.blocks:
__SCREAMING_SNAKE_CASE : int = ppm(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.interpolate(
_A , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_A )
return ppm_outs
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , _A : str , _A : Any ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = config
__SCREAMING_SNAKE_CASE : str = config.pool_scales # e.g. (1, 2, 3, 6)
__SCREAMING_SNAKE_CASE : Optional[Any] = in_channels
__SCREAMING_SNAKE_CASE : str = config.hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__SCREAMING_SNAKE_CASE : Any = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__SCREAMING_SNAKE_CASE : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__SCREAMING_SNAKE_CASE : int = nn.ModuleList()
__SCREAMING_SNAKE_CASE : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__SCREAMING_SNAKE_CASE : Optional[int] = UperNetConvModule(_A , self.channels , kernel_size=1 )
__SCREAMING_SNAKE_CASE : Any = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_A )
self.fpn_convs.append(_A )
__SCREAMING_SNAKE_CASE : Tuple = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : List[str] , _A : Dict ):
"""simple docstring"""
if isinstance(_A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = inputs[-1]
__SCREAMING_SNAKE_CASE : Any = [x]
psp_outs.extend(self.psp_modules(_A ) )
__SCREAMING_SNAKE_CASE : int = torch.cat(_A , dim=1 )
__SCREAMING_SNAKE_CASE : int = self.bottleneck(_A )
return output
def UpperCAmelCase__ ( self : int , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_A ) )
# build top-down path
__SCREAMING_SNAKE_CASE : Dict = len(_A )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__SCREAMING_SNAKE_CASE : Tuple = laterals[i - 1].shape[2:]
__SCREAMING_SNAKE_CASE : Dict = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_A , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__SCREAMING_SNAKE_CASE : int = torch.cat(_A , dim=1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.fpn_bottleneck(_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.classifier(_A )
return output
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , _A : str , _A : List[Any] = 2 , _A : Optional[int] = 3 , _A : Any = 1 ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[int] = config
__SCREAMING_SNAKE_CASE : List[str] = config.auxiliary_in_channels
__SCREAMING_SNAKE_CASE : Any = config.auxiliary_channels
__SCREAMING_SNAKE_CASE : Optional[int] = config.auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Any = config.auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Dict = in_index
__SCREAMING_SNAKE_CASE : Dict = (kernel_size // 2) * dilation
__SCREAMING_SNAKE_CASE : Tuple = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_A , padding=_A , dilation=_A ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_A , padding=_A , dilation=_A ) )
if self.num_convs == 0:
__SCREAMING_SNAKE_CASE : List[str] = nn.Identity()
else:
__SCREAMING_SNAKE_CASE : Tuple = nn.Sequential(*_A )
if self.concat_input:
__SCREAMING_SNAKE_CASE : int = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_A , padding=kernel_size // 2 )
__SCREAMING_SNAKE_CASE : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : List[Any] , _A : Tuple ):
"""simple docstring"""
if isinstance(_A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : List[Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = encoder_hidden_states[self.in_index]
__SCREAMING_SNAKE_CASE : Optional[int] = self.convs(_A )
if self.concat_input:
__SCREAMING_SNAKE_CASE : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__SCREAMING_SNAKE_CASE : Any = self.classifier(_A )
return output
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
lowerCAmelCase_ = UperNetConfig
lowerCAmelCase_ = "pixel_values"
lowerCAmelCase_ = True
def UpperCAmelCase__ ( self : Optional[Any] , _A : int ):
"""simple docstring"""
if isinstance(_A , _A ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Tuple , _A : Any , _A : Any=False ):
"""simple docstring"""
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Dict = value
lowercase_ = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , _lowercase , )
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Union[str, Any] ):
"""simple docstring"""
super().__init__(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__SCREAMING_SNAKE_CASE : List[Any] = UperNetHead(_A , in_channels=self.backbone.channels )
__SCREAMING_SNAKE_CASE : Dict = UperNetFCNHead(_A ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_A , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : List[str] , _A : List[Any] = None , _A : Union[str, Any] = None , _A : str = None , _A : Optional[Any] = None , _A : Optional[Any] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE : int = output_attentions if output_attentions is not None else self.config.output_attentions
__SCREAMING_SNAKE_CASE : Optional[int] = self.backbone.forward_with_filtered_kwargs(
_A , output_hidden_states=_A , output_attentions=_A )
__SCREAMING_SNAKE_CASE : str = outputs.feature_maps
__SCREAMING_SNAKE_CASE : int = self.decode_head(_A )
__SCREAMING_SNAKE_CASE : str = nn.functional.interpolate(_A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.auxiliary_head is not None:
__SCREAMING_SNAKE_CASE : Dict = self.auxiliary_head(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.functional.interpolate(
_A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_A )
__SCREAMING_SNAKE_CASE : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__SCREAMING_SNAKE_CASE : List[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__SCREAMING_SNAKE_CASE : List[Any] = loss_fct(_A , _A )
__SCREAMING_SNAKE_CASE : List[str] = loss_fct(_A , _A )
__SCREAMING_SNAKE_CASE : int = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__SCREAMING_SNAKE_CASE : Optional[int] = (logits,) + outputs[1:]
else:
__SCREAMING_SNAKE_CASE : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_A , logits=_A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained('''google/mt5-small''' )
snake_case_ : Optional[int] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
snake_case_ : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
snake_case_ : Tuple = model(lowercase_ , labels=lowercase_ ).loss
snake_case_ : List[str] = -tf.math.reduce_mean(lowercase_ ).numpy()
snake_case_ : Union[str, Any] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 264 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowerCamelCase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
lowerCamelCase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =calculate_rouge(_lowercase , _lowercase , bootstrap_aggregation=_lowercase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowercase , _lowercase )
a__ =calculate_rouge(_lowercase , _lowercase , bootstrap_aggregation=_lowercase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ ='''rougeLsum'''
a__ =calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=[k] )[k]
a__ =calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =['''rouge1''', '''rouge2''', '''rougeL''']
a__ =calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=_lowercase )
a__ =calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=_lowercase )
assert score_sep == score_no_sep
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =[
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
a__ =[
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase ) == calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =[
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
a__ =[
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
a__ =calculate_rouge(_lowercase , _lowercase , rouge_keys=['''rougeLsum'''] , newline_sep=_lowercase )['''rougeLsum''']
a__ =calculate_rouge(_lowercase , _lowercase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =Path('''examples/seq2seq/test_data/wmt_en_ro''' )
a__ =calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowercase , _lowercase )
a__ =calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=_lowercase )
assert isinstance(_lowercase , _lowercase )
| 188 |
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : int = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class _lowerCamelCase ( _lowercase ):
"""simple docstring"""
snake_case = "transfo-xl"
snake_case = ["mems"]
snake_case = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _SCREAMING_SNAKE_CASE=26_7735 , _SCREAMING_SNAKE_CASE=[2_0000, 4_0000, 20_0000] , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=1600 , _SCREAMING_SNAKE_CASE=1000 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="normal" , _SCREAMING_SNAKE_CASE=0.0_1 , _SCREAMING_SNAKE_CASE=0.0_1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , )->List[str]:
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : Dict = []
self.cutoffs.extend(_SCREAMING_SNAKE_CASE )
if proj_share_all_but_first:
A_ : Any = [False] + [True] * len(self.cutoffs )
else:
A_ : Optional[Any] = [False] + [False] * len(self.cutoffs )
A_ : Optional[int] = d_model
A_ : int = d_embed
A_ : Optional[Any] = d_head
A_ : Any = d_inner
A_ : Tuple = div_val
A_ : Union[str, Any] = pre_lnorm
A_ : Any = n_layer
A_ : Tuple = n_head
A_ : List[str] = mem_len
A_ : Dict = same_length
A_ : Optional[Any] = attn_type
A_ : Tuple = clamp_len
A_ : List[Any] = sample_softmax
A_ : str = adaptive
A_ : Tuple = dropout
A_ : Dict = dropatt
A_ : Optional[int] = untie_r
A_ : int = init
A_ : List[str] = init_range
A_ : List[Any] = proj_init_std
A_ : Any = init_std
A_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 186 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(_lowercase , _lowercase )
class snake_case :
"""simple docstring"""
def __init__( self : str ,*,
lowerCamelCase__ : Tuple = np.inf ,lowerCamelCase__ : List[str] = "linear" ,lowerCamelCase__ : Union[str, Any] = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(UpperCAmelCase__ ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : List[str] ) -> float:
UpperCAmelCase__ = 0
(UpperCAmelCase__ ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 0 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a = logging.get_logger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
UpperCAmelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCAmelCase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase : bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.task_name.lower()
class lowercase_ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase : Tuple = "train"
UpperCAmelCase : Any = "dev"
UpperCAmelCase : Optional[int] = "test"
class lowercase_ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase : GlueDataTrainingArguments
UpperCAmelCase : str
UpperCAmelCase : List[InputFeatures]
def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : int = Split.train , _UpperCAmelCase : Any = None , ):
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _UpperCAmelCase , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
_A = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + '''.lock'''
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(_UpperCAmelCase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : List[Any] , _UpperCAmelCase : Tuple ):
return self.features[i]
def lowerCAmelCase_ ( self : int ):
return self.label_list
| 315 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] = 4 ):
'''simple docstring'''
lowercase = abs(_lowercase ) or 4
return [[1 + x + y * row_size for x in range(_lowercase )] for y in range(_lowercase )]
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
return reverse_row(transpose(_lowercase ) )
# OR.. transpose(reverse_column(matrix))
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple ):
'''simple docstring'''
return reverse_row(reverse_column(_lowercase ) )
# OR.. reverse_column(reverse_row(matrix))
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
return reverse_column(transpose(_lowercase ) )
# OR.. transpose(reverse_row(matrix))
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = [list(_lowercase ) for x in zip(*_lowercase )]
return matrix
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
lowercase = matrix[::-1]
return matrix
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict ):
'''simple docstring'''
lowercase = [x[::-1] for x in matrix]
return matrix
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
for i in matrix:
print(*_lowercase )
if __name__ == "__main__":
_UpperCamelCase : int = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
_UpperCamelCase : str = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
_UpperCamelCase : Optional[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 220 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __A ( a_ :Dict) -> np.ndarray:
__a : Any = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __A ( a_ :Union[str, Any]) -> np.ndarray:
return (gray > 1_27) & (gray <= 2_55)
def __A ( a_ :int , a_ :Any) -> np.ndarray:
__a : List[Any] = np.zeros_like(_lowercase)
__a : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1))
# Copy image to padded image
__a : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1]):
for y in range(image.shape[0]):
__a : Tuple = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__a : str = int(summation > 0)
return output
if __name__ == "__main__":
# read original image
A = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
A = np.array(Image.open(lena_path))
# kernel to be applied
A = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
A = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
A = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 160 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : List[str] = 50 ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 331 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
if not is_accelerate_available():
return method
lowerCamelCase_ = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse("0.17.0" ):
return method
def wrapper(self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *_lowercase , **_lowercase )
return wrapper
| 55 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 0 |
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318 | 0 |
"""simple docstring"""
from __future__ import annotations
lowercase__ : Tuple = list[list[int]]
# assigning initial values to the grid
lowercase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowercase ( _a , _a , _a , _a ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowercase ( _a ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowercase ( _a ):
if location := find_empty_location(_lowercase ):
snake_case_ : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowercase , _lowercase , _lowercase , _lowercase ):
snake_case_ : Dict = digit
if sudoku(_lowercase ) is not None:
return grid
snake_case_ : Dict = 0
return None
def __lowercase ( _a ):
for row in grid:
for cell in row:
print(_lowercase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
lowercase__ : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 264 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : List[str] = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __UpperCamelCase ( _lowercase ):
A_ = "vit"
def __init__( self , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-1_2 , __a=224 , __a=16 , __a=3 , __a=True , __a=16 , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : List[str] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Dict = num_attention_heads
__a : int = intermediate_size
__a : int = hidden_act
__a : int = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : str = initializer_range
__a : Union[str, Any] = layer_norm_eps
__a : Tuple = image_size
__a : Dict = patch_size
__a : Optional[int] = num_channels
__a : Optional[Any] = qkv_bias
__a : Union[str, Any] = encoder_stride
class __UpperCamelCase ( _lowercase ):
A_ = version.parse("1.11" )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 1E-4
| 27 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCamelCase ( _lowercase ):
"""simple docstring"""
snake_case = "megatron-bert"
def __init__( self , _SCREAMING_SNAKE_CASE=2_9056 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ : List[Any] = vocab_size
A_ : Optional[int] = hidden_size
A_ : int = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : List[str] = hidden_act
A_ : Dict = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : int = layer_norm_eps
A_ : Optional[int] = position_embedding_type
A_ : Dict = use_cache
| 186 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 0 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ):
UpperCAmelCase__ = cipher_alphabet or [chr(_lowercase ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase__ = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
UpperCAmelCase__ = frequencies_dict
if not case_sensitive:
UpperCAmelCase__ = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase__ = {}
# cycle through all of the shifts
for shift in range(len(_lowercase ) ):
UpperCAmelCase__ = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase__ = (alphabet_letters.index(letter.lower() ) - shift) % len(
_lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase__ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase__ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase__ = decrypted_with_shift.lower().count(_lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase__ = decrypted_with_shift.count(_lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase__ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCamelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase__ = min(
_lowercase , key=_lowercase , )
# Get all the data from the most likely cipher (key, decoded message)
(
UpperCAmelCase__
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 98 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 0 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Any ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Dict ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : Tuple ):
pass
class lowercase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : str ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[str] ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Dict ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 315 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class a ( _lowercase, unittest.TestCase ):
UpperCAmelCase_ : int =CpmAntTokenizer
UpperCAmelCase_ : Union[str, Any] =False
def UpperCamelCase_ ( self ):
super().setUp()
lowercase = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def UpperCamelCase_ ( self ):
lowercase = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowercase = '''今天天气真好!'''
lowercase = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowercase = '''今天天气真好!'''
lowercase = [tokenizer.bos_token] + tokens
lowercase = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
lowercase = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 220 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 0 |
"""simple docstring"""
A = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 160 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
__magic_name__ : int = '''ZinengTang/tvlt-base'''
__magic_name__ : Union[str, Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self : List[str] , **_A : int ) -> str:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_A )
def __lowerCAmelCase ( self : List[Any] , **_A : List[Any] ) -> Union[str, Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : Optional[Any] = self.get_feature_extractor()
__magic_name__ : Union[str, Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _A )
self.assertIsInstance(processor.image_processor , _A )
def __lowerCAmelCase ( self : Dict ) -> str:
__magic_name__ : str = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : Union[str, Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
__magic_name__ : Union[str, Any] = np.ones([12000] )
__magic_name__ : str = feature_extractor(_A , return_tensors='np' )
__magic_name__ : int = processor(audio=_A , return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
__magic_name__ : Any = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : List[str] = TvltProcessor(image_processor=_A , feature_extractor=_A )
__magic_name__ : str = np.ones([3, 224, 224] )
__magic_name__ : Optional[Any] = image_processor(_A , return_tensors='np' )
__magic_name__ : str = processor(images=_A , return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = self.get_image_processor()
__magic_name__ : Optional[int] = self.get_feature_extractor()
__magic_name__ : str = TvltProcessor(image_processor=_A , feature_extractor=_A )
__magic_name__ : Optional[int] = np.ones([12000] )
__magic_name__ : int = np.ones([3, 224, 224] )
__magic_name__ : Optional[Any] = processor(audio=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def __lowerCAmelCase ( self : int ) -> Dict:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : int = self.get_feature_extractor()
__magic_name__ : List[Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 331 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 0 |
'''simple docstring'''
from math import loga
def __snake_case ( UpperCAmelCase_ : Optional[Any] ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowercase , _lowercase ):
raise TypeError("Input value must be a \'int\' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(example['''content'''] , truncation=_lowercase )['''input_ids''']
__SCREAMING_SNAKE_CASE : List[Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowercase_ = HfArgumentParser(PretokenizationArguments)
lowercase_ = parser.parse_args()
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase_ = time.time()
lowercase_ = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowercase_ = time.time()
lowercase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowercase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 0 |
"""simple docstring"""
def __lowercase ( _a , _a ):
snake_case_ : List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Dict = n - k
# Calculate C(n,k)
for i in range(_lowercase ):
result *= n - i
result //= i + 1
return result
def __lowercase ( _a ):
return binomial_coefficient(2 * node_count , _lowercase ) // (node_count + 1)
def __lowercase ( _a ):
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
snake_case_ : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowercase ( _a ):
return catalan_number(_lowercase ) * factorial(_lowercase )
if __name__ == "__main__":
lowercase__ : List[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
f'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 264 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __magic_name__ ( _lowercase , _lowercase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 1
@register_to_config
def __init__( self, lowercase_ = 1000, lowercase_ = None ) -> str:
"""simple docstring"""
self.set_timesteps(lowercase_ )
# standard deviation of the initial noise distribution
a__ =1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ =4
# running values
a__ =[]
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> Optional[Any]:
"""simple docstring"""
a__ =num_inference_steps
a__ =torch.linspace(1, 0, num_inference_steps + 1 )[:-1]
a__ =torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ =torch.tensor(self.config.trained_betas, dtype=torch.floataa )
else:
a__ =torch.sin(steps * math.pi / 2 ) ** 2
a__ =(1.0 - self.betas**2) ** 0.5
a__ =(torch.atana(self.betas, self.alphas ) / math.pi * 2)[:-1]
a__ =timesteps.to(lowercase_ )
a__ =[]
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ = True, ) -> Tuple:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
a__ =(self.timesteps == timestep).nonzero().item()
a__ =timestep_index + 1
a__ =sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowercase_ )
if len(self.ets ) == 1:
a__ =self.ets[-1]
elif len(self.ets ) == 2:
a__ =(3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ =(23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ =(1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ =self._get_prev_sample(lowercase_, lowercase_, lowercase_, lowercase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase_ )
def _UpperCAmelCase ( self, lowercase_, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return sample
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Optional[int]:
"""simple docstring"""
a__ =self.alphas[timestep_index]
a__ =self.betas[timestep_index]
a__ =self.alphas[prev_timestep_index]
a__ =self.betas[prev_timestep_index]
a__ =(sample - sigma * ets) / max(lowercase_, 1E-8 )
a__ =next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 188 |
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowercase : Union[str, Any] = '''naver-clova-ix/donut-base'''
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = DonutProcessor.from_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__a : Any = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__a : Optional[int] = self.processor.tokenajson(__a )
self.assertDictEqual(__a , __a )
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if isinstance(_lowercase , np.ndarray ):
return list(tensor.shape )
A_ : Dict = tf.shape(_lowercase )
if tensor.shape == tf.TensorShape(_lowercase ):
return dynamic
A_ : Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_lowercase )]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowercase , name=_lowercase )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=-1 ):
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowercase , _lowercase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
A_ : Dict = tf.nn.moments(_lowercase , axes=[axis] , keepdims=_lowercase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A_ : str = [1] * inputs.shape.rank
A_ : Optional[int] = shape_list(_lowercase )[axis]
A_ : List[Any] = tf.reshape(_lowercase , _lowercase )
A_ : Union[str, Any] = tf.reshape(_lowercase , _lowercase )
# Compute layer normalization using the batch_normalization
# function.
A_ : str = tf.nn.batch_normalization(
_lowercase , _lowercase , _lowercase , offset=_lowercase , scale=_lowercase , variance_epsilon=_lowercase , )
return outputs
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=-1 ):
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A_ : str = tf.shape(_lowercase )
A_ : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A_ : Any = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if not isinstance(_lowercase , tf.Tensor ):
A_ : List[Any] = tf.convert_to_tensor(_lowercase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A_ : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A_ : List[str] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A_ : Union[str, Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "input_ids" ):
tf.debugging.assert_less(
_lowercase , tf.cast(_lowercase , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(_lowercase )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A_ : str = [x for x in data if len(_lowercase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
A_ : List[Any] = np.asarray(_lowercase )
A_ : Optional[Any] = 1
A_ : Optional[int] = np.array_split(_lowercase , _lowercase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A_ : Optional[int] = np.array_split(_lowercase , _lowercase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_lowercase ):
A_ : List[str] = chunk_data
else:
A_ : List[Any] = data
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if name in group.attrs:
A_ : Tuple = [n.decode('''utf8''' ) if hasattr(_lowercase , '''decode''' ) else n for n in group.attrs[name]]
else:
A_ : Optional[int] = []
A_ : str = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_lowercase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE ):
if isinstance(_lowercase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_lowercase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _lowercase )
| 186 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
"""simple docstring"""
lowerCAmelCase__ : Any = 8.314_4598
def a_ ( lowerCamelCase , lowerCamelCase ):
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCAmelCase__ : Optional[Any] = 300
lowerCAmelCase__ : Dict = 28
lowerCAmelCase__ : Optional[int] = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 98 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Tuple=18 , _UpperCAmelCase : Any=30 , _UpperCAmelCase : Optional[Any]=400 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=None , ):
_A = size if size is not None else {'''height''': 20, '''width''': 20}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = size
_A = do_normalize
_A = do_convert_rgb
_A = [512, 1_024, 2_048, 4_096]
_A = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowerCAmelCase_ ( self : int ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
_A = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ):
_A = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_convert_rgb' ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.image_processor_tester.prepare_dummy_image()
_A = self.image_processing_class(**self.image_processor_dict )
_A = 2_048
_A = image_processor(_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
_A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
_A = '''Hello'''
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
_A = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : str ):
_A = PixaStructImageProcessingTester(self , num_channels=4 )
_A = 3
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_convert_rgb' ) )
def lowerCAmelCase_ ( self : Any ):
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 315 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] , ):
'''simple docstring'''
lowercase = coefficient_matrix.shape
lowercase = constant_matrix.shape
if rowsa != colsa:
lowercase = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_lowercase )
if colsa != 1:
lowercase = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_lowercase )
if rowsa != rowsa:
lowercase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_lowercase )
if len(_lowercase ) != rowsa:
lowercase = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'matrix but received {len(_lowercase )} and {rowsa}'
)
raise ValueError(_lowercase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
lowercase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase = table.shape
strictly_diagonally_dominant(_lowercase )
# Iterates the whole matrix for given number of times
for _ in range(_lowercase ):
lowercase = []
for row in range(_lowercase ):
lowercase = 0
for col in range(_lowercase ):
if col == row:
lowercase = table[row][col]
elif col == cols - 1:
lowercase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase = (temp + val) / denom
new_val.append(_lowercase )
lowercase = new_val
return [float(_lowercase ) for i in new_val]
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
lowercase = table.shape
lowercase = True
for i in range(0 , _lowercase ):
lowercase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowercase ( _lowercase ):
'''simple docstring'''
__lowerCAmelCase = "pegasus"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=4096 , _UpperCAmelCase=16 , _UpperCAmelCase=12 , _UpperCAmelCase=4096 , _UpperCAmelCase=16 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=1024 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0 , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , **_UpperCAmelCase , ):
__a : Tuple = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : Union[str, Any] = d_model
__a : Any = encoder_ffn_dim
__a : int = encoder_layers
__a : Optional[int] = encoder_attention_heads
__a : Union[str, Any] = decoder_ffn_dim
__a : Optional[int] = decoder_layers
__a : Dict = decoder_attention_heads
__a : Optional[Any] = dropout
__a : Any = attention_dropout
__a : Dict = activation_dropout
__a : Optional[int] = activation_function
__a : Any = init_std
__a : Dict = encoder_layerdrop
__a : Union[str, Any] = decoder_layerdrop
__a : int = use_cache
__a : Dict = encoder_layers
__a : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
@property
def _lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self ):
return self.d_model
| 160 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318 | 0 |
'''simple docstring'''
lowerCAmelCase :int = 9.8_06_65
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : int = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 331 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : Optional[Any] ):
assert (
isinstance(_lowercase , _lowercase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowerCamelCase_ = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase_ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str , _A : str , _A : Tuple , _A : Tuple = None , _A : Any = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__SCREAMING_SNAKE_CASE : Dict = os.path.abspath('''examples''' )
for item in os.listdir(_A ):
if item not in EXCLUDE_EXAMPLES:
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(_A , _A )
if os.path.isfile(_A ) and ".py" in item_path:
with self.subTest(
tested_script=_A , feature_script=_A , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = compare_against_test(
os.path.join(_A , _A ) , _A , _A , _A )
__SCREAMING_SNAKE_CASE : int = '''\n'''.join(_A )
if special_strings is not None:
for string in special_strings:
__SCREAMING_SNAKE_CASE : List[str] = diff.replace(_A , '''''' )
self.assertEqual(_A , '''''' )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , _A )
self.one_complete_example('''complete_nlp_example.py''' , _A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__SCREAMING_SNAKE_CASE : List[Any] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _A , _A , _A )
self.one_complete_example('''complete_cv_example.py''' , _A , _A , _A )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
lowerCAmelCase_ = False
@classmethod
def UpperCAmelCase__ ( cls : str ):
"""simple docstring"""
super().setUpClass()
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__SCREAMING_SNAKE_CASE : Any = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__SCREAMING_SNAKE_CASE : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
__SCREAMING_SNAKE_CASE : str = run_command(self._launch_args + testargs , return_stdout=_A )
self.assertNotIn('''epoch 0:''' , _A )
self.assertIn('''epoch 1:''' , _A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
__SCREAMING_SNAKE_CASE : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=_A )
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE : List[Any] = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _A )
self.assertIn('''epoch 1:''' , _A )
else:
self.assertIn('''epoch 0:''' , _A )
self.assertIn('''epoch 1:''' , _A )
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__SCREAMING_SNAKE_CASE : Dict = run_command(self._launch_args + testargs , return_stdout=_A )
__SCREAMING_SNAKE_CASE : Any = re.findall('''({.+})''' , _A )
__SCREAMING_SNAKE_CASE : Tuple = [r for r in results if '''accuracy''' in r][-1]
__SCREAMING_SNAKE_CASE : str = ast.literal_eval(_A )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__SCREAMING_SNAKE_CASE : int = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_A , '''tracking''' ) ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 303 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _UpperCAmelCase :
def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : List[str]=13 , lowercase_ : Any=7 , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[int]=True , lowercase_ : Any=99 , lowercase_ : List[str]=32 , lowercase_ : Dict=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]=37 , lowercase_ : str="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : int=16 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Dict=3 , lowercase_ : Dict=4 , lowercase_ : Union[str, Any]=None , ):
snake_case_ : Any = parent
snake_case_ : int = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : int = use_token_type_ids
snake_case_ : Any = use_labels
snake_case_ : Optional[int] = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : str = num_labels
snake_case_ : Tuple = num_choices
snake_case_ : Optional[Any] = scope
def _snake_case ( self : Tuple ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_input_mask:
snake_case_ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = None
snake_case_ : Dict = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : List[str] ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def _snake_case ( self : List[Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
snake_case_ : str = LlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : int = model(lowercase_ , attention_mask=lowercase_ )
snake_case_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Dict , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[Any] , ):
snake_case_ : List[Any] = True
snake_case_ : Optional[Any] = LlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
snake_case_ : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
snake_case_ : str = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[Any] , ):
snake_case_ : Union[str, Any] = LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : str , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Any , ):
snake_case_ : str = True
snake_case_ : List[str] = True
snake_case_ : Tuple = LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
snake_case_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
snake_case_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
snake_case_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
snake_case_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def _snake_case ( self : str ):
snake_case_ : int = self.prepare_config_and_inputs()
(
snake_case_
) : Any = config_and_inputs
snake_case_ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
_lowerCAmelCase : List[str] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_lowerCAmelCase : str = (LlamaForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : int = False
_lowerCAmelCase : Dict = False
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[str] = LlamaModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Any ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : str ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : Optional[Any] = type
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[str] = 3
snake_case_ : Optional[int] = input_dict['''input_ids''']
snake_case_ : List[str] = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : Union[str, Any] = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : str = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[str] = 3
snake_case_ : List[str] = '''single_label_classification'''
snake_case_ : Dict = input_dict['''input_ids''']
snake_case_ : Any = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : int = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[Any] = 3
snake_case_ : Any = '''multi_label_classification'''
snake_case_ : Tuple = input_dict['''input_ids''']
snake_case_ : Union[str, Any] = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ : Dict = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def _snake_case ( self : Dict ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] ):
snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
snake_case_ : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Dict = LlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
snake_case_ : Optional[int] = original_model(lowercase_ ).last_hidden_state
snake_case_ : Dict = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Tuple = {'''type''': scaling_type, '''factor''': 10.0}
snake_case_ : Optional[int] = LlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
snake_case_ : int = scaled_model(lowercase_ ).last_hidden_state
snake_case_ : List[Any] = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
snake_case_ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
snake_case_ : Optional[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case_ : List[str] = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ : int = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _snake_case ( self : Any ):
snake_case_ : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
snake_case_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
snake_case_ : Dict = model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
snake_case_ : List[Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ : Any = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
snake_case_ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
snake_case_ : Optional[Any] = model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
snake_case_ : int = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ : Optional[Any] = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
snake_case_ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
snake_case_ : Tuple = model(torch.tensor(lowercase_ ) )
snake_case_ : List[str] = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
snake_case_ : Any = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def _snake_case ( self : List[str] ):
snake_case_ : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case_ : str = '''Simply put, the theory of relativity states that '''
snake_case_ : Optional[Any] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case_ : List[str] = tokenizer.encode(lowercase_ , return_tensors='''pt''' )
snake_case_ : List[str] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=lowercase_ )
# greedy generation outputs
snake_case_ : List[str] = model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ )
snake_case_ : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 264 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCamelCase = None
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowerCamelCase = {
'''camembert-base''': 512,
}
lowerCamelCase = '''▁'''
class __magic_name__ ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : str = ["input_ids", "attention_mask"]
lowerCamelCase__ : List[str] = CamembertTokenizer
def __init__( self, lowercase_=None, lowercase_=None, lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=["<s>NOTUSED", "</s>NOTUSED"], **lowercase_, ) -> str:
"""simple docstring"""
a__ =AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else mask_token
super().__init__(
lowercase_, tokenizer_file=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, additional_special_tokens=lowercase_, **lowercase_, )
a__ =vocab_file
a__ =False if not self.vocab_file else True
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> Tuple:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ =[self.cls_token_id]
a__ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> List[str]:
"""simple docstring"""
a__ =[self.sep_token_id]
a__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> Optional[Any]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ =os.path.join(
lowercase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file, lowercase_ )
return (out_vocab_file,)
| 188 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __UpperCamelCase ( _lowercase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = tempfile.mkdtemp()
__a : Union[str, Any] = 5
# Realm tok
__a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a : Dict = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(__a , exist_ok=__a )
__a : Dict = os.path.join(__a , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__a : Dict = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(__a , exist_ok=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = RealmConfig(num_block_records=self.num_block_records )
return config
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=__a , )
return block_records
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.get_config()
__a : Optional[Any] = self.get_dummy_retriever()
__a : Union[str, Any] = retriever.tokenizer
__a : Dict = np.array([0, 3] , dtype='long' )
__a : Any = tokenizer(['Test question'] ).input_ids
__a : List[Any] = tokenizer(
['the fourth'] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
__a : List[Any] = config.reader_seq_len
__a : Dict = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors='np' )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_config()
__a : Optional[int] = self.get_dummy_retriever()
__a : Optional[int] = retriever.tokenizer
__a : Union[str, Any] = np.array([0, 3, 5] , dtype='long' )
__a : Dict = tokenizer(['Test question'] ).input_ids
__a : List[str] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
__a : List[Any] = config.reader_seq_len
__a : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors='np' )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
__a : str = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
__a : Any = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
__a : Dict = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 27 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase = 16
UpperCamelCase = 32
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
A_ : Any = AutoTokenizer.from_pretrained(_lowercase )
A_ : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
A_ : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : Tuple = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
A_ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
A_ : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.eval()
A_ : int = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : List[str] = model(**_lowercase )
A_ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A_ : Dict = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
A_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A_ : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
A_ : str = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Any = config['''lr''']
A_ : str = int(config['''num_epochs'''] )
A_ : List[str] = int(config['''seed'''] )
A_ : List[str] = int(config['''batch_size'''] )
A_ : Dict = args.model_name_or_path
set_seed(_lowercase )
A_ : str = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
A_ : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : Any = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
A_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
A_ : Any = 1
A_ : str = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : Any = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
A_ : int = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ : int = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
A_ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : Dict = 0
A_ : Any = evaluate.load('''glue''' , '''mrpc''' )
A_ : int = num_epochs
if args.partial_train_epoch is not None:
A_ : Optional[int] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A_ : Union[str, Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
A_ : List[str] = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A_ : Optional[Any] = int(_lowercase ) + 1
A_ : List[Any] = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print('''resumed checkpoint performance:''' , _lowercase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
A_ : Union[str, Any] = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A_ : int = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
A_ : Dict = model(**_lowercase )
A_ : Optional[int] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A_ : Optional[Any] = f'''epoch_{epoch}'''
A_ : Union[str, Any] = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
A_ : str = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
A_ : int = accuracy
A_ : Dict = lr_scheduler.get_lr()[0]
A_ : Optional[int] = optimizer.param_groups[0]['''lr''']
A_ : List[str] = epoch
A_ : str = overall_step
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , )
A_ : List[Any] = parser.parse_args()
A_ : List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 186 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 0 |
"""simple docstring"""
import numpy as np
def a_ ( lowerCamelCase ):
return 1 / (1 + np.exp(-vector ))
def a_ ( lowerCamelCase ):
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = ["input_features", "is_longer"]
def __init__( self : Tuple , _UpperCAmelCase : Optional[Any]=64 , _UpperCAmelCase : List[str]=48_000 , _UpperCAmelCase : List[str]=480 , _UpperCAmelCase : Tuple=10 , _UpperCAmelCase : Any=1_024 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : str=False , _UpperCAmelCase : Tuple = 0 , _UpperCAmelCase : Dict = 14_000 , _UpperCAmelCase : str = None , _UpperCAmelCase : Optional[int] = "fusion" , _UpperCAmelCase : Any = "repeatpad" , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(
feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
_A = top_db
_A = truncation
_A = padding
_A = fft_window_size
_A = (fft_window_size >> 1) + 1
_A = hop_length
_A = max_length_s
_A = max_length_s * sampling_rate
_A = sampling_rate
_A = frequency_min
_A = frequency_max
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCAmelCase , min_frequency=_UpperCAmelCase , max_frequency=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , norm=_UpperCAmelCase , mel_scale='htk' , )
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCAmelCase , min_frequency=_UpperCAmelCase , max_frequency=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , norm='slaney' , mel_scale='slaney' , )
def lowerCAmelCase_ ( self : List[str] ):
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] = None ):
_A = spectrogram(
_UpperCAmelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCAmelCase , log_mel='dB' , )
return log_mel_spectrogram.T
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ):
_A = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
# randomly choose index for each part
_A = np.random.choice(ranges[0] )
_A = np.random.choice(ranges[1] )
_A = np.random.choice(ranges[2] )
_A = mel[idx_front : idx_front + chunk_frames, :]
_A = mel[idx_middle : idx_middle + chunk_frames, :]
_A = mel[idx_back : idx_back + chunk_frames, :]
_A = torch.tensor(mel[None, None, :] )
_A = torch.nn.functional.interpolate(
_UpperCAmelCase , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_UpperCAmelCase )
_A = mel_shrink[0][0].numpy()
_A = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_A = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_A = len(_UpperCAmelCase ) - max_length
_A = np.random.randint(0 , overflow + 1 )
_A = waveform[idx : idx + max_length]
_A = self._np_extract_fbank_features(_UpperCAmelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_A = self._np_extract_fbank_features(_UpperCAmelCase , self.mel_filters )
_A = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_A = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_A = np.stack([mel, mel, mel, mel] , axis=0 )
_A = False
else:
_A = self._random_mel_fusion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_A = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_A = int(max_length / len(_UpperCAmelCase ) )
_A = np.stack(np.tile(_UpperCAmelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_A = int(max_length / len(_UpperCAmelCase ) )
_A = np.stack(np.tile(_UpperCAmelCase , _UpperCAmelCase ) )
_A = np.pad(_UpperCAmelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_A = self._np_extract_fbank_features(_UpperCAmelCase , self.mel_filters )
_A = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_A = self._np_extract_fbank_features(_UpperCAmelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] = None , _UpperCAmelCase : int = None , _UpperCAmelCase : List[Any] = None , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Optional[Any] = None , **_UpperCAmelCase : Tuple , ):
_A = truncation if truncation is not None else self.truncation
_A = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_A = isinstance(_UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
_A = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray(_UpperCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
_A = [
self._get_input_mel(_UpperCAmelCase , max_length if max_length else self.nb_max_samples , _UpperCAmelCase , _UpperCAmelCase )
for waveform in raw_speech
]
_A = []
_A = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCAmelCase )
is_longer.append(_UpperCAmelCase )
if truncation == "fusion" and sum(_UpperCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_A = np.random.randint(0 , len(_UpperCAmelCase ) )
_A = True
if isinstance(input_mel[0] , _UpperCAmelCase ):
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_A = [[longer] for longer in is_longer]
_A = {'''input_features''': input_mel, '''is_longer''': is_longer}
_A = BatchFeature(_UpperCAmelCase )
if return_tensors is not None:
_A = input_features.convert_to_tensors(_UpperCAmelCase )
return input_features
| 315 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ):
lowercase = 1
lowercase = 3
lowercase = (3_2, 3_2)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCamelCase )
return image
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_lowerCamelCase )
@property
def UpperCamelCase_ ( self ):
def extract(*_lowerCamelCase , **_lowerCamelCase ):
class a :
def __init__( self ):
lowercase = torch.ones([0] )
def UpperCamelCase_ ( self , _lowerCamelCase ):
self.pixel_values.to(_lowerCamelCase )
return self
return Out()
return extract
def UpperCamelCase_ ( self ):
lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase = 7_7
lowercase = self.dummy_image.to(_lowerCamelCase )
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
lowercase = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCamelCase , )
lowercase = output.images
lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase_ ( self ):
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase = 7_7
lowercase = self.dummy_image.to(_lowerCamelCase )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
lowercase = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='np' , image=_lowerCamelCase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase_ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((7_6_0, 5_0_4) )
lowercase = '''BAAI/AltDiffusion'''
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
lowercase = '''A fantasy landscape, trending on artstation'''
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='np' , )
lowercase = output.images[0]
lowercase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowercase = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase = init_image.resize((7_6_8, 5_1_2) )
lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
lowercase = '''BAAI/AltDiffusion'''
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
lowercase = '''A fantasy landscape, trending on artstation'''
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='np' , )
lowercase = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 220 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model'''}
A = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __lowercase ( _lowercase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<sep>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<cls>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<eop>", "<eod>"] , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
__a : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__a : str = 3
__a : Optional[int] = do_lower_case
__a : Optional[int] = remove_space
__a : Dict = keep_accents
__a : List[Any] = vocab_file
__a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
__a : Any = jieba
__a : str = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self ):
return len(self.sp_model )
def _lowerCamelCase ( self ):
__a : Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a : int = self.__dict__.copy()
__a : Optional[Any] = None
return state
def __setstate__( self , _UpperCAmelCase ):
__a : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a : List[str] = {}
__a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.remove_space:
__a : List[str] = ''' '''.join(inputs.strip().split() )
else:
__a : Optional[Any] = inputs
__a : Optional[int] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__a : Optional[Any] = unicodedata.normalize('''NFKD''' , _UpperCAmelCase )
__a : str = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
__a : Any = outputs.lower()
return outputs
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = self.preprocess_text(_UpperCAmelCase )
__a : Tuple = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
__a : str = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__a : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a : Any = cur_pieces[1:]
else:
__a : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Any = ''''''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip()
return out_string
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Union[str, Any] = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = [self.sep_token_id]
__a : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Dict = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : Optional[Any] = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
__a : Dict = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 160 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase :List[Any] = logging.get_logger(__name__)
lowerCAmelCase :Union[str, Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowerCamelCase ( _lowercase ):
'''simple docstring'''
A_ : Dict = "instructblip_vision_model"
def __init__( self : Union[str, Any] , _A : str=1408 , _A : List[Any]=6144 , _A : int=39 , _A : Union[str, Any]=16 , _A : Optional[Any]=224 , _A : Any=14 , _A : Optional[int]="gelu" , _A : Dict=1E-6 , _A : Optional[int]=0.0 , _A : Dict=1E-10 , _A : Dict=True , **_A : Tuple , ) -> Dict:
super().__init__(**_A )
__magic_name__ : str = hidden_size
__magic_name__ : Dict = intermediate_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Any = patch_size
__magic_name__ : Optional[Any] = image_size
__magic_name__ : Tuple = initializer_range
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : int = hidden_act
__magic_name__ : Dict = qkv_bias
@classmethod
def __lowerCAmelCase ( cls : List[Any] , _A : Optional[int] , **_A : str ) -> Optional[int]:
cls._set_token_in_kwargs(_A )
__magic_name__ : Optional[Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__magic_name__ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( _lowercase ):
'''simple docstring'''
A_ : Optional[int] = "instructblip_qformer"
def __init__( self : str , _A : List[Any]=30522 , _A : Union[str, Any]=768 , _A : Optional[int]=12 , _A : int=12 , _A : Union[str, Any]=3072 , _A : List[str]="gelu" , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : List[Any]=512 , _A : int=0.02 , _A : Optional[Any]=1E-12 , _A : Dict=0 , _A : Union[str, Any]="absolute" , _A : int=2 , _A : List[Any]=1408 , **_A : Dict , ) -> int:
super().__init__(pad_token_id=_A , **_A )
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : str = num_hidden_layers
__magic_name__ : Tuple = num_attention_heads
__magic_name__ : str = hidden_act
__magic_name__ : int = intermediate_size
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = max_position_embeddings
__magic_name__ : str = initializer_range
__magic_name__ : Dict = layer_norm_eps
__magic_name__ : Optional[int] = position_embedding_type
__magic_name__ : int = cross_attention_frequency
__magic_name__ : Union[str, Any] = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls : int , _A : Optional[Any] , **_A : Union[str, Any] ) -> Dict:
cls._set_token_in_kwargs(_A )
__magic_name__ : List[str] = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__magic_name__ : List[str] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( _lowercase ):
'''simple docstring'''
A_ : List[str] = "instructblip"
A_ : int = True
def __init__( self : Optional[int] , _A : Optional[Any]=None , _A : int=None , _A : str=None , _A : Optional[int]=32 , **_A : Dict ) -> Union[str, Any]:
super().__init__(**_A )
if vision_config is None:
__magic_name__ : Optional[Any] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__magic_name__ : Optional[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__magic_name__ : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__magic_name__ : str = InstructBlipVisionConfig(**_A )
__magic_name__ : Tuple = InstructBlipQFormerConfig(**_A )
__magic_name__ : List[str] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__magic_name__ : Union[str, Any] = CONFIG_MAPPING[text_model_type](**_A )
__magic_name__ : int = self.text_config.tie_word_embeddings
__magic_name__ : str = self.text_config.is_encoder_decoder
__magic_name__ : List[Any] = num_query_tokens
__magic_name__ : List[Any] = self.vision_config.hidden_size
__magic_name__ : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__magic_name__ : Union[str, Any] = 1.0
__magic_name__ : Any = 0.02
@classmethod
def __lowerCAmelCase ( cls : Tuple , _A : List[str] , _A : int , _A : List[str] , **_A : Optional[int] , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
__magic_name__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
__magic_name__ : Dict = self.vision_config.to_dict()
__magic_name__ : int = self.qformer_config.to_dict()
__magic_name__ : Optional[int] = self.text_config.to_dict()
__magic_name__ : Any = self.__class__.model_type
return output
| 331 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : Any = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
a_ : int = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
a_ : Any = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ = evaluate(dataset=UpperCamelCase , predictions=UpperCamelCase )
return score
| 55 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def a__ ( snake_case , snake_case , snake_case = 1 , snake_case = 1 , snake_case = 1.0E4 , snake_case = False , snake_case = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
__SCREAMING_SNAKE_CASE : List[str] = float(embedding_dim // 2 )
__SCREAMING_SNAKE_CASE : Optional[int] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__SCREAMING_SNAKE_CASE : Any = min_timescale * jnp.exp(jnp.arange(_lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowercase , 1 ) * jnp.expand_dims(_lowercase , 0 )
# scale embeddings
__SCREAMING_SNAKE_CASE : List[Any] = scale * emb
if flip_sin_to_cos:
__SCREAMING_SNAKE_CASE : Dict = jnp.concatenate([jnp.cos(_lowercase ), jnp.sin(_lowercase )] , axis=1 )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate([jnp.sin(_lowercase ), jnp.cos(_lowercase )] , axis=1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.reshape(_lowercase , [jnp.shape(_lowercase )[0], embedding_dim] )
return signal
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase_ = 32
lowerCAmelCase_ = jnp.floataa
@nn.compact
def __call__( self : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(_A )
__SCREAMING_SNAKE_CASE : Tuple = nn.silu(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(_A )
return temb
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase_ = 32
lowerCAmelCase_ = False
lowerCAmelCase_ = 1
@nn.compact
def __call__( self : Union[str, Any] , _A : Tuple ):
"""simple docstring"""
return get_sinusoidal_embeddings(
_A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 0 |
"""simple docstring"""
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 264 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 188 |
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
'''simple docstring'''
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowercase : List[str] = float('nan')
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[Any] = sys.stdout
__a : int = open(__a , 'a' )
def __getattr__( self , __a ):
'''simple docstring'''
return getattr(self.stdout , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.stdout.write(__a )
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , __a , 0 , re.M ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple=80 , _SCREAMING_SNAKE_CASE : Optional[Any]=False ):
__a : Tuple = []
# deal with critical env vars
__a : List[Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
__a : str = os.environ.get(_lowercase , _lowercase )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
__a : Optional[Any] = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(_lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__a : Tuple = []
__a : Optional[Any] = ''''''
while len(_lowercase ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(_lowercase ) == 0 or len(_lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowercase )
__a : Union[str, Any] = ''''''
return "\\\n".join(_lowercase )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : List[Any] = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
__a : List[Any] = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
__a : List[str] = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ):
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 100.2, 5_5.6_6_6_6, 222.22_222_222] )} , )
__a : List[str] = subprocess.run(_lowercase , capture_output=_lowercase , text=_lowercase )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
__a : List[Any] = variation.replace(' ' , '-' )
with open(Path(_lowercase ) / F"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(_lowercase ) / F"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
__a : Any = json.load(_lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , ):
__a : int = []
__a : Union[str, Any] = []
__a : str = F"""{id}: {variation:<{longest_variation_len}}"""
__a : Optional[int] = F"""{preamble}: """
__a : str = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowercase ) , desc=_lowercase , leave=_lowercase ):
__a : Any = process_run_single(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
__a : Any = single_run_metrics[target_metric_key]
if not math.isnan(_lowercase ):
metrics.append(_lowercase )
results.append(_lowercase )
outcome += "✓"
else:
outcome += "✘"
__a : Tuple = F"""\33[2K\r{outcome}"""
if len(_lowercase ) > 0:
__a : Dict = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__a : Optional[int] = round(mean_metrics[target_metric_key] , 2 )
__a : Dict = F"""{outcome} {mean_target}"""
if len(_lowercase ) > 1:
results_str += F""" {tuple(round(_lowercase , 2 ) for x in results )}"""
print(_lowercase )
__a : str = variation
return mean_metrics
else:
print(_lowercase )
return {variation_key: variation, target_metric_key: nan}
def lowerCamelCase ():
__a : List[str] = torch.cuda.get_device_properties(torch.device('cuda' ) )
return F"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : List[str] = pd.DataFrame(_lowercase )
__a : Dict = '''variation'''
__a : str = '''diff_%'''
__a : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__a : Any = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowercase ):
# as a fallback, use the minimal value as the sentinel
__a : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowercase ):
__a : int = df.apply(
lambda _SCREAMING_SNAKE_CASE : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
__a : Optional[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__a : List[str] = df.reindex(_lowercase , axis='columns' ) # reorder cols
# capitalize
__a : Optional[int] = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
__a : Any = df.rename(lambda _SCREAMING_SNAKE_CASE : c.replace('_' , '<br>' ) , axis='columns' )
__a : Optional[int] = df.rename(lambda _SCREAMING_SNAKE_CASE : c.replace('_' , '\n' ) , axis='columns' )
__a : Optional[Any] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowercase , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowercase , floatfmt='.2f' )]
print('\n\n'.join(_lowercase ) )
def lowerCamelCase ():
__a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=_lowercase , type=_lowercase , required=_lowercase , help='Base cmd' , )
parser.add_argument(
'--variations' , default=_lowercase , type=_lowercase , nargs='+' , required=_lowercase , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=_lowercase , type=_lowercase , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=_lowercase , type=_lowercase , required=_lowercase , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=_lowercase , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=_lowercase , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=_lowercase , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=_lowercase , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
__a : Any = parser.parse_args()
__a : List[Any] = args.output_dir
Path(_lowercase ).mkdir(exist_ok=_lowercase )
__a : int = get_base_command(_lowercase , _lowercase )
# split each dimension into its --foo variations
__a : Union[str, Any] = [list(map(str.strip , re.split(r'\|' , _lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__a : List[str] = list(map(str.strip , map(' '.join , itertools.product(*_lowercase ) ) ) )
__a : List[str] = max(len(_lowercase ) for x in variations )
# split wanted keys
__a : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
__a : Any = F"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
__a : Optional[Any] = Tee(_lowercase )
print(F"""\n*** Running {len(_lowercase )} benchmarks:""" )
print(F"""Base command: {" ".join(_lowercase )}""" )
__a : List[Any] = '''variation'''
__a : Dict = []
for id, variation in enumerate(tqdm(_lowercase , desc='Total completion: ' , leave=_lowercase ) ):
__a : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowercase , _lowercase , _lowercase , _lowercase , args.target_metric_key , _lowercase , args.repeat_times , _lowercase , args.verbose , ) )
process_results(_lowercase , args.target_metric_key , _lowercase , args.base_variation , _lowercase )
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318 | 0 |
import operator
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None ):
A_ : Optional[Any] = operator.lt if reverse else operator.gt
A_ : Optional[int] = solution or []
if not arr:
return solution
A_ : List[Any] = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
A_ : List[str] = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 186 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : List[str] = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 98 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
A: Dict = s.rsplit(__lowercase , __lowercase )
return new.join(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE( __lowercase ) -> str:
A: List[Any] = {}
A: Union[str, Any] = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
A: Any = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
A: Dict = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
A: List[str] = rreplace(__lowercase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
A: Dict = rreplace(__lowercase , '''.b''' , '''.bias''' , 1 )
A: List[Any] = value.float()
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , __lowercase=True ) -> List[str]:
from dall_e import Encoder
A: Tuple = Encoder()
if os.path.exists(__lowercase ):
A: str = torch.load(__lowercase )
else:
A: Optional[Any] = torch.hub.load_state_dict_from_url(__lowercase )
if isinstance(__lowercase , __lowercase ):
A: str = ckpt.state_dict()
encoder.load_state_dict(__lowercase )
if config_path is not None:
A: Tuple = FlavaImageCodebookConfig.from_pretrained(__lowercase )
else:
A: Optional[Any] = FlavaImageCodebookConfig()
A: str = FlavaImageCodebook(__lowercase ).eval()
A: List[str] = encoder.state_dict()
A: Union[str, Any] = upgrade_state_dict(__lowercase )
hf_model.load_state_dict(__lowercase )
A: Optional[int] = hf_model.state_dict()
A: Optional[Any] = count_parameters(__lowercase )
A: List[Any] = count_parameters(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowercase )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCamelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 319 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> list:
A: str = len(__lowercase )
for i in range(1 , __lowercase ):
A: str = collection[i]
A: Tuple = 0
A: List[Any] = i - 1
while low <= high:
A: str = (low + high) // 2
if val < collection[mid]:
A: int = mid - 1
else:
A: str = mid + 1
for j in range(__lowercase , __lowercase , -1 ):
A: Union[str, Any] = collection[j - 1]
A: int = val
return collection
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Dict = """vit_mae"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str=7_68 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : List[Any]=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1E-12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_24 , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=8 , SCREAMING_SNAKE_CASE_ : Optional[int]=20_48 , SCREAMING_SNAKE_CASE_ : Tuple=0.75 , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Dict , ) -> str:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
A: str = hidden_size
A: str = num_hidden_layers
A: int = num_attention_heads
A: Optional[int] = intermediate_size
A: Union[str, Any] = hidden_act
A: str = hidden_dropout_prob
A: List[Any] = attention_probs_dropout_prob
A: Optional[int] = initializer_range
A: int = layer_norm_eps
A: int = image_size
A: Union[str, Any] = patch_size
A: Tuple = num_channels
A: Union[str, Any] = qkv_bias
A: Union[str, Any] = decoder_num_attention_heads
A: Optional[Any] = decoder_hidden_size
A: Any = decoder_num_hidden_layers
A: Optional[Any] = decoder_intermediate_size
A: str = mask_ratio
A: str = norm_pix_loss
| 319 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : int = None
def _snake_case ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
A: str = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A: Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A: List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A: Union[str, Any] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A: Dict = self.feature_extraction_class()
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 319 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Any = """longformer"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[List[int], int] = 5_12 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 3_05_22 , SCREAMING_SNAKE_CASE_ : int = 7_68 , SCREAMING_SNAKE_CASE_ : int = 12 , SCREAMING_SNAKE_CASE_ : int = 12 , SCREAMING_SNAKE_CASE_ : int = 30_72 , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 5_12 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1E-12 , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: str = attention_window
A: List[str] = sep_token_id
A: Tuple = bos_token_id
A: Optional[int] = eos_token_id
A: Tuple = vocab_size
A: List[Any] = hidden_size
A: Optional[Any] = num_hidden_layers
A: List[str] = num_attention_heads
A: Union[str, Any] = hidden_act
A: int = intermediate_size
A: int = hidden_dropout_prob
A: List[str] = attention_probs_dropout_prob
A: List[str] = max_position_embeddings
A: List[Any] = type_vocab_size
A: Dict = initializer_range
A: List[str] = layer_norm_eps
A: str = onnx_export
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : "PretrainedConfig" , SCREAMING_SNAKE_CASE_ : str = "default" , SCREAMING_SNAKE_CASE_ : "List[PatchingSpec]" = None ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Any = True
@property
def _snake_case ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _snake_case ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A: Any = super().outputs
if self.task == "default":
A: str = {0: '''batch'''}
return outputs
@property
def _snake_case ( self : Tuple ) -> float:
'''simple docstring'''
return 1E-4
@property
def _snake_case ( self : Tuple ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A: str = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
A: Optional[int] = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
A: Union[str, Any] = 1
return inputs
| 319 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 1 |
'''simple docstring'''
from math import sqrt
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Dict = 0
for i in range(1 , int(sqrt(__lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowercase ):
total += i + n // i
elif i == sqrt(__lowercase ):
total += i
return total - n
def SCREAMING_SNAKE_CASE( __lowercase = 1_0_0_0_0 ) -> int:
A: Any = sum(
i
for i in range(1 , __lowercase )
if sum_of_divisors(sum_of_divisors(__lowercase ) ) == i and sum_of_divisors(__lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
UpperCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
UpperCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
UpperCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = False
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
UpperCamelCase = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
UpperCamelCase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
UpperCamelCase = reader.read()
UpperCamelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
UpperCamelCase = UNetaDModel(**config)
else:
UpperCamelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
UpperCamelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCamelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCamelCase = config[key]
del config[key]
UpperCamelCase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
UpperCamelCase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
UpperCamelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
UpperCamelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
UpperCamelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
UpperCamelCase = param_value
UpperCamelCase = True
if not has_changed:
UpperCamelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 319 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
A: Any = [x.strip() for x in open(__lowercase ).readlines()]
A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 319 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
if args.student_type == "roberta":
A: Union[str, Any] = False
elif args.student_type == "gpt2":
A: str = False
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
if args.student_type == "roberta":
A: Optional[Any] = False
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: str = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__lowercase , required=__lowercase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__lowercase , required=__lowercase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__lowercase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__lowercase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__lowercase , required=__lowercase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__lowercase , type=__lowercase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__lowercase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__lowercase , required=__lowercase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__lowercase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__lowercase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__lowercase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__lowercase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__lowercase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__lowercase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=__lowercase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__lowercase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__lowercase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__lowercase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__lowercase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__lowercase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__lowercase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__lowercase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__lowercase , default=5_0 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=__lowercase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=__lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__lowercase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=__lowercase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__lowercase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__lowercase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__lowercase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__lowercase , default=5_6 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__lowercase , default=5_0_0 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__lowercase , default=4_0_0_0 , help='''Checkpoint interval.''' )
A: Tuple = parser.parse_args()
sanity_checks(__lowercase )
# ARGS #
init_gpu_params(__lowercase )
set_seed(__lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__lowercase ) , __lowercase , indent=4 )
git_log(args.dump_path )
A , A , A: int = MODEL_CLASSES[args.student_type]
A , A , A: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
A: Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
A: Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
A: List[Any] = tokenizer.all_special_tokens.index(__lowercase )
A: Tuple = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
A: List[str] = special_tok_ids
A: Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
A: int = pickle.load(__lowercase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
A: Any = pickle.load(__lowercase )
A: Dict = np.maximum(__lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
A: int = 0.0 # do not predict special tokens
A: int = torch.from_numpy(__lowercase )
else:
A: Tuple = None
A: Optional[int] = LmSeqsDataset(params=__lowercase , data=__lowercase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
A: Dict = student_config_class.from_pretrained(args.student_config )
A: Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
A: List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowercase )
else:
A: Any = student_model_class(__lowercase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
A: Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowercase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowercase , __lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowercase , __lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
A: Optional[int] = Distiller(
params=__lowercase , dataset=__lowercase , token_probs=__lowercase , student=__lowercase , teacher=__lowercase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 319 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE( __lowercase = 1_0_0 ) -> int:
A: List[Any] = sum(i * i for i in range(1 , n + 1 ) )
A: Dict = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 319 |
'''simple docstring'''
from itertools import permutations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A: int = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__lowercase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int:
return sum(
int(''''''.join(map(__lowercase , __lowercase ) ) )
for num in permutations(range(__lowercase ) )
if is_substring_divisible(__lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 319 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase = '''base_with_context'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
A: Any = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
A: str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
A: str = weights[F"""layers_{lyr_num}"""]
A: Any = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
A: List[Any] = ly_weight['''attention''']
A: List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
A: Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
A: Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
A: str = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
A: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
A: int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
A: List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
A: Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
A: Dict = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
A: List[str] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
A: Tuple = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
A: List[Any] = weights[F"""layers_{lyr_num}"""]
A: Dict = ly_weight['''attention''']
A: Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
A: int = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
A: Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
A: Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
A: Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
A: Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
A: Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
A: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
A: str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
A: Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[int]:
A: Any = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
A: Optional[int] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
A: str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowercase )
A: Dict = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A: str = weights[F"""layers_{lyr_num}"""]
A: Any = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
A: Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
A: List[Any] = ly_weight['''self_attention''']
A: str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
A: Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
A: Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
A: int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
A: Optional[Any] = ly_weight['''MultiHeadDotProductAttention_0''']
A: int = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
A: Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
A: int = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
A: Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
A: Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
A: Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
A: Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
A: Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
A: Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
A: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
A: List[Any] = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
A: Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A: Dict = jnp.tree_util.tree_map(onp.array , __lowercase )
A: int = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
A: List[str] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
A: str = inference.parse_training_gin_file(__lowercase , __lowercase )
A: int = inference.InferenceModel(args.checkpoint_path , __lowercase )
A: Union[str, Any] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
A: str = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
A: int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
A: List[str] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
A: Any = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , __lowercase )
A: List[Any] = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , __lowercase )
A: Any = load_decoder(ta_checkpoint['''target''']['''decoder'''] , __lowercase )
A: Union[str, Any] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
A: Dict = SpectrogramDiffusionPipeline(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
UpperCamelCase = parser.parse_args()
main(args)
| 319 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 1 |
'''simple docstring'''
import functools
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# Validation
if not isinstance(__lowercase , __lowercase ) or not all(isinstance(__lowercase , __lowercase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowercase ) != 3 or not all(isinstance(__lowercase , __lowercase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowercase ) == 0:
return 0
if min(__lowercase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowercase ) >= 3_6_6:
raise ValueError('''All days elements should be less than 366''' )
A: Optional[int] = set(__lowercase )
@functools.cache
def dynamic_programming(__lowercase ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[str] ) -> None:
'''simple docstring'''
A: Tuple = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
A: str = Vector()
def _snake_case ( self : List[Any] ) -> None:
'''simple docstring'''
A: Dict = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , '''(0,0,0,0,0,1)''' )
def _snake_case ( self : int ) -> None:
'''simple docstring'''
A: Optional[int] = Vector([1, 2, 3, 4] )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
def _snake_case ( self : List[str] ) -> None:
'''simple docstring'''
A: Any = Vector([1, 2] )
A: str = Vector([1, 2, 3, 4, 5] )
A: Dict = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
A: int = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _snake_case ( self : Any ) -> None:
'''simple docstring'''
A: List[Any] = Vector([1, 2, 3] )
A: List[str] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _snake_case ( self : List[Any] ) -> None:
'''simple docstring'''
A: Any = Vector([1, 2, 3] )
A: Tuple = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _snake_case ( self : Dict ) -> None:
'''simple docstring'''
A: List[str] = Vector([1, 2, 3] )
A: Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
A: Tuple = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def _snake_case ( self : str ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def _snake_case ( self : str ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def _snake_case ( self : str ) -> None:
'''simple docstring'''
A: Tuple = Vector([1, 2, 3] )
A: Any = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , '''(3,4,7)''' )
def _snake_case ( self : str ) -> None:
'''simple docstring'''
A: Optional[int] = Vector([1, 0, 0, 0, 0, 0] )
A: str = x.copy()
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , str(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Any ) -> None:
'''simple docstring'''
A: Optional[Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , '''(0,1,0)''' )
def _snake_case ( self : Union[str, Any] ) -> None:
'''simple docstring'''
A: Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Optional[int] ) -> None:
'''simple docstring'''
A: Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A: Union[str, Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Any ) -> None:
'''simple docstring'''
A: Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A: str = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int ) -> None:
'''simple docstring'''
A: Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _snake_case ( self : Optional[int] ) -> None:
'''simple docstring'''
A: List[str] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
A: Any = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def _snake_case ( self : List[Any] ) -> None:
'''simple docstring'''
A: Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Optional[int] ) -> None:
'''simple docstring'''
A: Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _snake_case ( self : Union[str, Any] ) -> None:
'''simple docstring'''
A: List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A: Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def _snake_case ( self : List[Any] ) -> None:
'''simple docstring'''
A: Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A: Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def _snake_case ( self : Union[str, Any] ) -> None:
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 319 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 1 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 319 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 319 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ["""flax"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : str , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Any = ["""flax"""]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : int , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = ["""flax"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : str , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Any = ["""flax"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : Any , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ["""flax"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : str , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> int:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = ["""flax"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ["""flax"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : str , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Any = ["""flax"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : int , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : int , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ["""flax"""]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : str , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = ["""flax"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : int , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ["""flax"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Any = ["""flax"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ["""flax"""]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def _snake_case ( cls : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def _snake_case ( cls : str , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 319 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> set[str]:
A , A: Dict = set(__lowercase ), [start]
while stack:
A: List[str] = stack.pop()
explored.add(__lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowercase )
return explored
UpperCamelCase = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 319 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE_ : CLIPSegProcessor , SCREAMING_SNAKE_CASE_ : AutoencoderKL , SCREAMING_SNAKE_CASE_ : CLIPTextModel , SCREAMING_SNAKE_CASE_ : CLIPTokenizer , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE_ : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE_ : CLIPImageProcessor , ) -> List[str]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
A: Tuple = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
A: Any = dict(scheduler.config )
A: int = 1
A: Optional[Any] = FrozenDict(SCREAMING_SNAKE_CASE_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
A: List[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
A: str = dict(scheduler.config )
A: List[str] = True
A: Any = FrozenDict(SCREAMING_SNAKE_CASE_ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE_ , segmentation_processor=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = "auto" ) -> Optional[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A: Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
A: Union[str, Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : int , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 5_12 , SCREAMING_SNAKE_CASE_ : int = 5_12 , SCREAMING_SNAKE_CASE_ : int = 50 , SCREAMING_SNAKE_CASE_ : float = 7.5 , SCREAMING_SNAKE_CASE_ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
A: int = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
A: List[Any] = self.segmentation_model(**SCREAMING_SNAKE_CASE_ )
A: List[Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
A: Any = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
A: List[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , )
| 319 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = VideoToVideoSDPipeline
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
UpperCamelCase_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
UpperCamelCase_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase_ : Any = False
# No `output_type`.
UpperCamelCase_ : str = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A: Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
A: Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
A: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A: Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
A: Optional[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
A: List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A: Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=0 ) -> int:
'''simple docstring'''
A: Any = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
A: Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
A: str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
A: int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A: List[str] = self.get_dummy_components()
A: Any = VideoToVideoSDPipeline(**SCREAMING_SNAKE_CASE_ )
A: List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A: str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = '''np'''
A: Any = sd_pipe(**SCREAMING_SNAKE_CASE_ ).frames
A: List[str] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A: Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self : Any ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _snake_case ( self : Tuple ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _snake_case ( self : Tuple ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: Optional[int] = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A: List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A: int = torch.randn((1, 10, 3, 10_24, 5_76) , generator=SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = video.to('''cuda''' )
A: Tuple = '''Spiderman is surfing'''
A: Optional[int] = pipe(SCREAMING_SNAKE_CASE_ , video=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=3 , output_type='''pt''' ).frames
A: str = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def SCREAMING_SNAKE_CASE( __lowercase ) -> str:
if "model" in orig_key:
A: List[Any] = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
A: str = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
A: int = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
A: Any = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
A: List[Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
A: int = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
A: Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
A: Tuple = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
A: Tuple = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
A: Union[str, Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
A: Union[str, Any] = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
A: List[Any] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
A: Dict = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
A: List[Any] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
A: Optional[Any] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
A: Any = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
A: List[str] = '''yoso.''' + orig_key
return orig_key
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
A: int = orig_state_dict.pop(__lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
A: List[str] = val
A: Any = orig_state_dict['''cls.predictions.decoder.bias''']
A: Tuple = torch.arange(__lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
A: Dict = torch.load(__lowercase , map_location='''cpu''' )['''model_state_dict''']
A: List[Any] = YosoConfig.from_json_file(__lowercase )
A: str = YosoForMaskedLM(__lowercase )
A: List[str] = convert_checkpoint_helper(config.max_position_embeddings , __lowercase )
print(model.load_state_dict(__lowercase ) )
model.eval()
model.save_pretrained(__lowercase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 319 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 1 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 319 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 1 |
'''simple docstring'''
UpperCamelCase = {str(digit): digit**5 for digit in range(10)}
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> int:
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(__lowercase ) )
if __name__ == "__main__":
print(solution())
| 319 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The column name of the images in the files."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the training data."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the validation data."""} )
UpperCamelCase_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A: Union[str, Any] = {}
if self.train_dir is not None:
A: int = self.train_dir
if self.validation_dir is not None:
A: Any = self.validation_dir
A: List[Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : str = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : str = field(default=UpperCAmelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase_ : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]:
A: Any = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A: int = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A: int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
A: Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
A: Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
A: int = ds['''train'''].train_test_split(data_args.train_val_split )
A: Optional[Any] = split['''train''']
A: List[str] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: Any = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: Optional[int] = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
A: Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase )
elif model_args.model_name_or_path:
A: Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: List[str] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
A: Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: Any = ViTMAEForPreTraining(__lowercase )
if training_args.do_train:
A: Optional[Any] = ds['''train'''].column_names
else:
A: Optional[int] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
A: str = data_args.image_column_name
elif "image" in column_names:
A: List[Any] = '''image'''
elif "img" in column_names:
A: str = '''img'''
else:
A: Dict = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
A: str = image_processor.size['''shortest_edge''']
else:
A: Any = (image_processor.size['''height'''], image_processor.size['''width'''])
A: Optional[Any] = Compose(
[
Lambda(lambda __lowercase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__lowercase ):
A: Tuple = [transforms(__lowercase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A: Tuple = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A: List[str] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase )
# Compute absolute learning rate
A: str = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
A: List[str] = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
A: Tuple = Trainer(
model=__lowercase , args=__lowercase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
A: Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
A: List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A: List[str] = last_checkpoint
A: Optional[Any] = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A: Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , __lowercase )
trainer.save_metrics('''eval''' , __lowercase )
# Write model card and (optionally) push to hub
A: List[Any] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Dict = [10, 20, 30, 40, 50, 60]
A: List[Any] = [2, 4, 6, 8, 10, 12]
A: List[str] = 1_00
self.assertEqual(kp.calc_profit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 2_10 )
def _snake_case ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''max_weight must greater than zero.''' )
def _snake_case ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''Weight can not be negative.''' )
def _snake_case ( self : int ) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''Profit can not be negative.''' )
def _snake_case ( self : List[Any] ) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''max_weight must greater than zero.''' )
def _snake_case ( self : int ) -> Any:
'''simple docstring'''
self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 319 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = True , __lowercase = math.inf , __lowercase = -math.inf , __lowercase = math.inf , __lowercase = -math.inf , __lowercase = False , __lowercase = 1_0_0 , __lowercase = 0.0_1 , __lowercase = 1 , ) -> Any:
A: List[str] = False
A: Dict = search_prob
A: int = start_temperate
A: Union[str, Any] = []
A: Any = 0
A: List[Any] = None
while not search_end:
A: List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
A: Dict = current_state
scores.append(__lowercase )
iterations += 1
A: List[str] = None
A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
A: Union[str, Any] = random.randint(0 , len(__lowercase ) - 1 ) # picking a random neighbor
A: Dict = neighbors.pop(__lowercase )
A: Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
A: List[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
A: Optional[Any] = picked_neighbor
else:
A: List[str] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
A: Any = picked_neighbor
A: Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
A: Dict = True
else:
A: Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__lowercase ) , __lowercase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[int]:
return (3 * x**2) - (6 * y)
UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
UpperCamelCase_ : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
A: List[str] = import_module('''tasks''' )
try:
A: Any = getattr(__lowercase , model_args.task_type )
A: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A: List[Any] = token_classification_task.get_labels(data_args.labels )
A: Dict[int, str] = dict(enumerate(__lowercase ) )
A: Optional[int] = len(__lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , idalabel=__lowercase , labelaid={label: i for i, label in enumerate(__lowercase )} , cache_dir=model_args.cache_dir , )
A: int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A: List[str] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
A: List[str] = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A: int = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowercase , __lowercase ) -> Tuple[List[int], List[int]]:
A: List[str] = np.argmax(__lowercase , axis=2 )
A , A: Union[str, Any] = preds.shape
A: Optional[Any] = [[] for _ in range(__lowercase )]
A: Optional[Any] = [[] for _ in range(__lowercase )]
for i in range(__lowercase ):
for j in range(__lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowercase ) -> Dict:
A , A: Dict = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowercase , __lowercase ),
"precision": precision_score(__lowercase , __lowercase ),
"recall": recall_score(__lowercase , __lowercase ),
"f1": fa_score(__lowercase , __lowercase ),
}
# Data collator
A: str = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A: Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Dict = trainer.evaluate()
A: Any = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowercase , __lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowercase )
# Predict
if training_args.do_predict:
A: Any = TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A , A , A: List[Any] = trainer.predict(__lowercase )
A , A: Tuple = align_predictions(__lowercase , __lowercase )
A: int = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowercase , __lowercase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
A: str = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowercase , __lowercase , __lowercase )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
A: Any = [x.strip() for x in open(__lowercase ).readlines()]
A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 319 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 4_0_0_0_0_0_0 ) -> int:
A: Union[str, Any] = []
A , A: Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowercase )
A , A: Dict = b, a + b
return sum(__lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0.0 , __lowercase = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A: List[str] = '''lm_head'''
A: Tuple = getattr(__lowercase , __lowercase )
if weight_type is not None:
A: List[Any] = getattr(__lowercase , __lowercase ).shape
else:
A: List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A: Any = value
elif weight_type == "weight_g":
A: Dict = value
elif weight_type == "weight_v":
A: Tuple = value
elif weight_type == "bias":
A: int = value
else:
A: int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Dict:
A: Any = []
A: Dict = fairseq_model.state_dict()
A: List[Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A: Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == '''group''' , )
A: Dict = True
else:
for key, mapped_key in MAPPING.items():
A: Tuple = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A: Union[str, Any] = True
if "*" in mapped_key:
A: Optional[int] = name.split(__lowercase )[0].split('''.''' )[-2]
A: Union[str, Any] = mapped_key.replace('''*''' , __lowercase )
if "weight_g" in name:
A: Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
A: str = '''weight_v'''
elif "bias" in name:
A: Optional[int] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A: Tuple = '''weight'''
else:
A: Tuple = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
A: List[str] = full_name.split('''conv_layers.''' )[-1]
A: List[str] = name.split('''.''' )
A: List[str] = int(items[0] )
A: str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A: str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A: int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A: Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A: Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=True ) -> List[str]:
if config_path is not None:
A: Optional[int] = UniSpeechConfig.from_pretrained(__lowercase )
else:
A: Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
A: int = Dictionary.load_from_json(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A: str = target_dict.pad_index
A: Union[str, Any] = target_dict.bos_index
A: List[Any] = target_dict.eos_index
A: Tuple = len(target_dict.symbols )
A: Any = os.path.join(__lowercase , '''vocab.json''' )
if not os.path.isdir(__lowercase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowercase ) )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
A: List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
A: Tuple = 4_2
A: Union[str, Any] = 4_3
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowercase , __lowercase )
A: Optional[int] = WavaVecaPhonemeCTCTokenizer(
__lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowercase , )
A: Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
A: Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
A: str = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
A: List[Any] = UniSpeechForCTC(__lowercase )
else:
A: Union[str, Any] = UniSpeechForPreTraining(__lowercase )
if is_finetuned:
A , A , A: Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
A , A , A: Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A: Tuple = model[0].eval()
recursively_load_weights(__lowercase , __lowercase , __lowercase )
hf_unispeech.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 319 |
'''simple docstring'''
from itertools import permutations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A: int = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__lowercase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int:
return sum(
int(''''''.join(map(__lowercase , __lowercase ) ) )
for num in permutations(range(__lowercase ) )
if is_substring_divisible(__lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 319 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
UpperCamelCase = '''▁'''
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : str="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Dict="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> None:
'''simple docstring'''
A: Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
A: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
A: Tuple = vocab_file
A: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
A: Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
A: int = len(self.sp_model ) - 1
A: List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: Tuple = [self.cls_token_id]
A: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: List[str] = [self.sep_token_id]
A: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.sp_model )
def _snake_case ( self : str ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A: Any = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
'''simple docstring'''
A: Any = []
A: Dict = ''''''
A: Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
A: List[Any] = True
A: str = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
A: Tuple = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A: str = self.__dict__.copy()
A: Tuple = None
return state
def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
'''simple docstring'''
A: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A: Optional[Any] = {}
A: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: str = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
A: Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 319 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 1 |
'''simple docstring'''
from typing import Any
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[Any]:
if not input_list:
return []
A: Dict = [input_list.count(__lowercase ) for value in input_list]
A: Dict = max(__lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(__lowercase , config=__lowercase )
A: Union[str, Any] = downstream_dict['''projector.weight''']
A: Optional[Any] = downstream_dict['''projector.bias''']
A: List[Any] = downstream_dict['''model.post_net.linear.weight''']
A: Optional[Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> List[str]:
A: int = WavaVecaForAudioFrameClassification.from_pretrained(__lowercase , config=__lowercase )
A: str = downstream_dict['''model.linear.weight''']
A: List[Any] = downstream_dict['''model.linear.bias''']
return model
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[Any] = WavaVecaForXVector.from_pretrained(__lowercase , config=__lowercase )
A: List[str] = downstream_dict['''connector.weight''']
A: Tuple = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A: Optional[int] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
A: Optional[Any] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
A: int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
A: str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
A: Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
A: Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
A: str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
A: Union[str, Any] = torch.load(__lowercase , map_location='''cpu''' )
A: int = checkpoint['''Downstream''']
A: Dict = WavaVecaConfig.from_pretrained(__lowercase )
A: Dict = WavaVecaFeatureExtractor.from_pretrained(
__lowercase , return_attention_mask=__lowercase , do_normalize=__lowercase )
A: Optional[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
A: str = convert_classification(__lowercase , __lowercase , __lowercase )
elif arch.endswith('''ForAudioFrameClassification''' ):
A: Tuple = convert_diarization(__lowercase , __lowercase , __lowercase )
elif arch.endswith('''ForXVector''' ):
A: Any = convert_xvector(__lowercase , __lowercase , __lowercase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
A: str = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 319 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]:
A: Optional[int] = SwinConfig()
A: Union[str, Any] = swin_name.split('''_''' )
A: Union[str, Any] = name_split[1]
A: Dict = int(name_split[4] )
A: Dict = int(name_split[3][-1] )
if model_size == "tiny":
A: List[Any] = 9_6
A: Tuple = (2, 2, 6, 2)
A: List[str] = (3, 6, 1_2, 2_4)
elif model_size == "small":
A: Union[str, Any] = 9_6
A: Dict = (2, 2, 1_8, 2)
A: Optional[Any] = (3, 6, 1_2, 2_4)
elif model_size == "base":
A: int = 1_2_8
A: Tuple = (2, 2, 1_8, 2)
A: Optional[int] = (4, 8, 1_6, 3_2)
else:
A: Dict = 1_9_2
A: Dict = (2, 2, 1_8, 2)
A: List[str] = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
A: int = 2_1_8_4_1
else:
A: Any = 1_0_0_0
A: List[str] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Optional[int] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: int = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: str = img_size
A: str = num_classes
A: List[Any] = embed_dim
A: Tuple = depths
A: Union[str, Any] = num_heads
A: List[Any] = window_size
return config
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]:
if "patch_embed.proj" in name:
A: Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A: Union[str, Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
A: str = '''encoder.''' + name
if "attn.proj" in name:
A: Dict = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A: Optional[int] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A: int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A: int = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A: Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A: Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
A: Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
A: Any = '''layernorm.bias'''
if "head" in name:
A: Any = name.replace('''head''' , '''classifier''' )
else:
A: Optional[Any] = '''swin.''' + name
return name
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for key in orig_state_dict.copy().keys():
A: str = orig_state_dict.pop(__lowercase )
if "mask" in key:
continue
elif "qkv" in key:
A: Optional[int] = key.split('''.''' )
A: Any = int(key_split[1] )
A: List[str] = int(key_split[3] )
A: Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A: Dict = val[:dim, :]
A: int = val[
dim : dim * 2, :
]
A: Union[str, Any] = val[-dim:, :]
else:
A: List[Any] = val[
:dim
]
A: List[Any] = val[
dim : dim * 2
]
A: Optional[Any] = val[
-dim:
]
else:
A: List[Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[int]:
A: Any = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
A: Optional[int] = get_swin_config(__lowercase )
A: Any = SwinForImageClassification(__lowercase )
model.eval()
A: Optional[int] = convert_state_dict(timm_model.state_dict() , __lowercase )
model.load_state_dict(__lowercase )
A: int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
A: Optional[int] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
A: List[Any] = image_processor(images=__lowercase , return_tensors='''pt''' )
A: Tuple = timm_model(inputs['''pixel_values'''] )
A: Optional[int] = model(**__lowercase ).logits
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 319 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
A: Optional[int] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
A: List[Any] = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__lowercase , 1 ):
if n < _p:
# then we have our last prime to check
A: Union[str, Any] = primes[:idx]
break
A , A: List[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
A: Optional[Any] = False
for r in range(__lowercase ):
A: Tuple = pow(__lowercase , d * 2**r , __lowercase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
A: List[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def SCREAMING_SNAKE_CASE( ) -> None:
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 319 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.