code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
A : Union[str, Any] = mock.Mock()
A : int = 500
A : Union[str, Any] = {}
A : str = HTTPError
A : str = {}
# Download this model to make sure it's in the cache.
A : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase_ ) as mock_head:
A : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : int = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
A : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(lowerCAmelCase_ )
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def _UpperCAmelCase ( cls : Dict ):
"""simple docstring"""
A : str = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def _UpperCAmelCase ( cls : List[str] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A : List[Any] = ViTImageProcessor.from_pretrained(lowerCAmelCase_ )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A : List[Any] = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase_ , repo_id='''test-image-processor''' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
A : List[str] = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : str = ViTImageProcessor.from_pretrained(lowerCAmelCase_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A : Tuple = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase_ , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
A : int = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A : List[Any] = CustomImageProcessor.from_pretrained(lowerCAmelCase_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A : List[str] = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' ) | 256 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['MobileViTFeatureExtractor']
_lowerCamelCase : Union[str, Any] = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case : Optional[Any] = str(bin(lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case : Any = str(bin(lowercase ) )[2:]
if shift_amount >= len(lowercase ):
return "0b0"
snake_case : Optional[Any] = binary_number[: len(lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
snake_case : Tuple = "0" + str(bin(lowercase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case : List[str] = len(bin(lowercase )[3:] ) # Find 2's complement of number
snake_case : Optional[Any] = bin(abs(lowercase ) - (1 << binary_number_length) )[3:]
snake_case : str = (
"1" + "0" * (binary_number_length - len(lowercase )) + binary_number
)
if shift_amount >= len(lowercase ):
return "0b" + binary_number[0] * len(lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117 |
"""simple docstring"""
from typing import Any
def __lowerCAmelCase ( lowercase : list , lowercase : list , lowercase : dict , lowercase : dict , lowercase : dict , ) -> list:
"""simple docstring"""
_validation(
lowercase , lowercase , lowercase , lowercase , lowercase , )
# Creates data structures and fill initial step
snake_case : dict = {}
snake_case : dict = {}
for state in states_space:
snake_case : int = observations_space[0]
snake_case : Any = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase ) ):
snake_case : Optional[Any] = observations_space[o]
snake_case : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case : str = ""
snake_case : List[Any] = -1
for k_state in states_space:
snake_case : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case : Optional[Any] = probability
snake_case : int = k_state
# Update probabilities and pointers dicts
snake_case : List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case : List[Any] = arg_max
# The final observation
snake_case : Dict = observations_space[len(lowercase ) - 1]
# argmax for given final observation
snake_case : str = ""
snake_case : Optional[int] = -1
for k_state in states_space:
snake_case : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case : Optional[int] = probability
snake_case : List[Any] = k_state
snake_case : str = arg_max
# Process pointers backwards
snake_case : List[str] = last_state
snake_case : Optional[int] = []
for o in range(len(lowercase ) - 1 , -1 , -1 ):
result.append(lowercase )
snake_case : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
lowercase , lowercase , lowercase , lowercase , lowercase , )
_validate_lists(lowercase , lowercase )
_validate_dicts(
lowercase , lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any ) -> None:
"""simple docstring"""
_validate_list(lowercase , "observations_space" )
_validate_list(lowercase , "states_space" )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
if not isinstance(_object , lowercase ):
snake_case : List[str] = F'{var_name} must be a list'
raise ValueError(lowercase )
else:
for x in _object:
if not isinstance(lowercase , lowercase ):
snake_case : Tuple = F'{var_name} must be a list of strings'
raise ValueError(lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
_validate_dict(lowercase , "initial_probabilities" , lowercase )
_validate_nested_dict(lowercase , "transition_probabilities" )
_validate_nested_dict(lowercase , "emission_probabilities" )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
_validate_dict(_object , lowercase , lowercase )
for x in _object.values():
_validate_dict(lowercase , lowercase , lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : str , lowercase : type , lowercase : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , lowercase ):
snake_case : int = F'{var_name} must be a dict'
raise ValueError(lowercase )
if not all(isinstance(lowercase , lowercase ) for x in _object ):
snake_case : Optional[Any] = F'{var_name} all keys must be strings'
raise ValueError(lowercase )
if not all(isinstance(lowercase , lowercase ) for x in _object.values() ):
snake_case : Optional[int] = "nested dictionary " if nested else ""
snake_case : int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 117 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : List[Any] = logging.get_logger(__name__)
A : List[str] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A : List[Any] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def __lowerCAmelCase ( a__ , a__ ) -> Tuple:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = json.loads(f.read() )
__a = collections.OrderedDict()
__a = collections.OrderedDict()
__a = collections.OrderedDict()
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.readlines()
__a = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(a__ ):
__a = b
__a = idx
for wd in b:
__a = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __A( lowerCAmelCase__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Any:
'''simple docstring'''
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(
F"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(
F"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
__a = do_clean_text
__a , __a , __a , __a = load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_ )
__a = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = ''''''.join(lowerCAmelCase_ ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[int]:
'''simple docstring'''
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] )
if len(lowerCAmelCase_ ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
'''simple docstring'''
__a = 0
if os.path.isdir(lowerCAmelCase_ ):
__a = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
__a = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
__a = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__a = token_index
writer.write(''','''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , lowerCAmelCase_ )
return vocab_file, emoji_file
class __A( lowerCAmelCase__ ):
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = vocab # same as swe
__a = ids_to_tokens # same as bpe
__a = emoji
__a = np.max([len(lowerCAmelCase_ ) for w in self.vocab.keys()] )
__a = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
__a = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
__a = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
__a = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
__a = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
__a = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
__a = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
__a = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
__a = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> int:
'''simple docstring'''
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.content_repattera.sub('''<URL>''' , lowerCAmelCase_ )
__a = self.content_repattera.sub('''<EMAIL>''' , lowerCAmelCase_ )
__a = self.content_repattera.sub('''<TEL>''' , lowerCAmelCase_ )
__a = self.content_repattera.sub('''<DATE>''' , lowerCAmelCase_ )
__a = self.content_repattera.sub('''<DATE>''' , lowerCAmelCase_ )
__a = self.content_repattera.sub('''<PRICE>''' , lowerCAmelCase_ )
__a = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__a = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = text.replace(''' ''' , '''<SP>''' )
__a = text.replace(''' ''' , '''<SP>''' )
__a = text.replace('''\r\n''' , '''<BR>''' )
__a = text.replace('''\n''' , '''<BR>''' )
__a = text.replace('''\r''' , '''<BR>''' )
__a = text.replace('''\t''' , '''<TAB>''' )
__a = text.replace('''—''' , '''ー''' )
__a = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
__a = text.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if clean:
__a = self.clean_text(lowerCAmelCase_ )
def check_simbol(_snake_case ):
__a = x.encode()
if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 2:
__a = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(_snake_case ):
__a = x.encode()
if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 3:
__a = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
__a = 0
__a = []
while pos < len(lowerCAmelCase_ ):
__a = min(len(lowerCAmelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
__a = [] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1 ):
__a = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_ ) > 2:
__a = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase_ ) > 0:
# the smallest token_id is adopted
__a , __a , __a = sorted(lowerCAmelCase_ , key=lambda _snake_case : x[0] )[0]
result.append(lowerCAmelCase_ )
__a = e
else:
__a = pos + 1
__a = text[pos:end]
if check_simbol(lowerCAmelCase_ ):
result.append('''<KIGOU>''' )
elif checkuae(lowerCAmelCase_ ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
__a = end
return result
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case="\n" ) -> str:
'''simple docstring'''
__a = []
__a = []
__a = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase_ ) > 0:
words.append(bytearray(lowerCAmelCase_ ).decode('''utf-8''' , errors='''replace''' ) )
__a = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(lowerCAmelCase_ )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
words.append(bytearray(lowerCAmelCase_ ).decode('''utf-8''' , errors='''replace''' ) )
__a = ''''''.join(lowerCAmelCase_ )
return text | 219 |
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [[0] * n for i in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = y_points[i]
for i in range(2 ,UpperCAmelCase ):
for j in range(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : List[Any] = logging.get_logger(__name__)
def A__ ( A_ ) -> List[str]:
_lowercase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
_lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , A_ )
if matches:
_lowercase = float(matches[1] )
_lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowercase = 1_001
_lowercase = "imagenet-1k-id2label.json"
_lowercase = "huggingface/label-files"
_lowercase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
_lowercase = {int(A_ ) + 1: v for k, v in idalabel.items()}
_lowercase = "background"
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
return config
def A__ ( ) -> str:
_lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def A__ ( A_ , A_ , A_ , A_=False ) -> List[Any]:
_lowercase = get_mobilenet_va_config(A_ )
# Load 🤗 model
_lowercase = MobileNetVaForImageClassification(A_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A_ , A_ , A_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
_lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
_lowercase = model(**A_ )
_lowercase = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
_lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowercase = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A_ )
if push_to_hub:
print("Pushing to the hub..." )
_lowercase = "google/" + model_name
image_processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
__magic_name__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ : Union[str, Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 720 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = BlenderbotSmallConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'gelu'
def __init__( self : Optional[Any] , __A : Optional[Any] , __A : Optional[Any]=1_3 , __A : List[str]=7 , __A : List[str]=True , __A : Tuple=False , __A : str=9_9 , __A : Union[str, Any]=3_2 , __A : str=2 , __A : Optional[Any]=4 , __A : Optional[int]=3_7 , __A : str=0.1 , __A : str=0.1 , __A : int=2_0 , __A : Any=2 , __A : str=1 , __A : Union[str, Any]=0 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = bos_token_id
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase = prepare_blenderbot_small_inputs_dict(__A , __A , __A )
return config, inputs_dict
def snake_case ( self : int , __A : Tuple , __A : List[str] ):
"""simple docstring"""
_lowercase = TFBlenderbotSmallModel(config=__A ).get_decoder()
_lowercase = inputs_dict["input_ids"]
_lowercase = input_ids[:1, :]
_lowercase = inputs_dict["attention_mask"][:1, :]
_lowercase = inputs_dict["head_mask"]
_lowercase = 1
# first forward pass
_lowercase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase = model(__A , attention_mask=__A )[0]
_lowercase = model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase = output_from_no_past[:, -3:, random_slice_idx]
_lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1e-3 )
def A__ ( A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ) -> Optional[Any]:
if attention_mask is None:
_lowercase = tf.cast(tf.math.not_equal(A_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = TFBlenderbotSmallModelTester(self )
_lowercase = ConfigTester(self , config_class=__A )
def snake_case ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_tokenizers
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
UpperCAmelCase__ = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : str ):
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.tokenizer(self.src_text , return_tensors="tf" )
_lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
_lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 602 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , ):
UpperCAmelCase__: List[Any] = parent
UpperCAmelCase__: Any = 1_3
UpperCAmelCase__: Optional[Any] = 7
UpperCAmelCase__: List[Any] = 3_0
UpperCAmelCase__: Tuple = self.seq_length + self.mem_len
UpperCAmelCase__: List[str] = 1_5
UpperCAmelCase__: Dict = True
UpperCAmelCase__: Dict = True
UpperCAmelCase__: Optional[int] = 9_9
UpperCAmelCase__: Dict = [1_0, 5_0, 8_0]
UpperCAmelCase__: List[str] = 3_2
UpperCAmelCase__: Optional[Any] = 3_2
UpperCAmelCase__: int = 4
UpperCAmelCase__: List[Any] = 8
UpperCAmelCase__: Tuple = 1_2_8
UpperCAmelCase__: List[Any] = 2
UpperCAmelCase__: Tuple = 2
UpperCAmelCase__: List[Any] = None
UpperCAmelCase__: List[str] = 1
UpperCAmelCase__: Union[str, Any] = 0
UpperCAmelCase__: Any = 3
UpperCAmelCase__: List[str] = self.vocab_size - 1
UpperCAmelCase__: str = 0.01
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__: List[str] = None
if self.use_labels:
UpperCAmelCase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__: List[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCAmelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Union[str, Any] = TFTransfoXLModel(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__: Any = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__: int = {"input_ids": input_ids_a, "mems": mems_a}
UpperCAmelCase__ , UpperCAmelCase__: Dict = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: int = TFTransfoXLLMHeadModel(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__: List[str] = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__: Dict = {"input_ids": input_ids_a, "labels": lm_labels}
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase__: Any = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
UpperCAmelCase__ , UpperCAmelCase__: int = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Union[str, Any] = TFTransfoXLForSequenceClassification(lowerCamelCase__ )
UpperCAmelCase__: List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Any = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)): Dict = config_and_inputs
UpperCAmelCase__: Dict = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _a ,_a ,unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__magic_name__ = () if is_tf_available() else ()
__magic_name__ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[Any] = TFTransfoXLModelTester(self )
UpperCAmelCase__: Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , d_embed=3_7 )
def _UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase__ )
def _UpperCAmelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase__ )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase__ )
def _UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__: List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase__: Optional[int] = model_class(lowerCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase__: Tuple = model.get_output_embeddings()
assert isinstance(lowerCamelCase__ , tf.keras.layers.Layer )
UpperCAmelCase__: Any = model.get_bias()
assert name is None
else:
UpperCAmelCase__: List[Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase__: List[Any] = model.get_bias()
assert name is None
def _UpperCAmelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCAmelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__: str = TFTransfoXLModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def _UpperCAmelCase ( self ):
pass
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Any = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
UpperCAmelCase__: Tuple = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase__: str = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase__: Optional[Any] = model.generate(lowerCamelCase__ , max_length=2_0_0 , do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ ) | 113 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( _a ,_a ):
'''simple docstring'''
@register_to_config
def __init__( self , *,
lowerCamelCase__ = 4 , lowerCamelCase__ = 7_6_8 , lowerCamelCase__ , lowerCamelCase__ , ):
super().__init__()
UpperCAmelCase__: int = nn.Parameter(torch.zeros(lowerCamelCase__ ) )
# parameters for additional clip time embeddings
UpperCAmelCase__: Optional[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: str = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
# parameters for encoder hidden states
UpperCAmelCase__: Tuple = clip_extra_context_tokens
UpperCAmelCase__: List[str] = nn.Linear(
lowerCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__: Union[str, Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: str = nn.LayerNorm(lowerCamelCase__ )
def _UpperCAmelCase ( self , *, lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__: Any = image_embeddings.shape[0]
UpperCAmelCase__: Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__: Union[str, Any] = classifier_free_guidance_embeddings.expand(
lowerCamelCase__ , -1 )
UpperCAmelCase__: Tuple = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__: str = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__: List[str] = self.embedding_proj(lowerCamelCase__ )
UpperCAmelCase__: Optional[int] = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase__ )
UpperCAmelCase__: Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__: Dict = self.clip_extra_context_tokens_proj(lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = clip_extra_context_tokens.reshape(lowerCamelCase__ , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__: Dict = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__: Any = self.encoder_hidden_states_proj(lowerCamelCase__ )
UpperCAmelCase__: List[str] = self.text_encoder_hidden_states_norm(lowerCamelCase__ )
UpperCAmelCase__: Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings | 113 | 1 |
import socket
def A_( ):
UpperCAmelCase_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase_ = socket.gethostname()
UpperCAmelCase_ = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
UpperCAmelCase_ = sock.recv(1024 )
if not data:
break
out_file.write(__A )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 715 |
from torch import nn
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : List[str] , __lowercase : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = class_size
UpperCAmelCase_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase_ = nn.Linear(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.mlp(__lowercase )
return logits
| 486 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "spiece.model"}
A_ = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
A_ = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> None:
'''simple docstring'''
lowerCamelCase_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
lowerCamelCase_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
lowerCamelCase_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
lowerCamelCase_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
lowerCamelCase_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
lowerCamelCase_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCamelCase_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.sp_model.IdToPiece(lowerCamelCase_ )
return token
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = ''
lowerCamelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
lowerCamelCase_ = True
lowerCamelCase_ = []
else:
current_sub_tokens.append(lowerCamelCase_ )
lowerCamelCase_ = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('use_source_tokenizer' , lowerCamelCase_ )
lowerCamelCase_ = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ = []
lowerCamelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
lowerCamelCase_ = []
sub_texts.append(lowerCamelCase_ )
else:
current_sub_text.append(lowerCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCamelCase_ = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(lowerCamelCase_ ) )
else:
lowerCamelCase_ = ''.join(lowerCamelCase_ )
lowerCamelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ = self.clean_up_tokenization(lowerCamelCase_ )
return clean_text
else:
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , 'wb' ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 42 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=3 , lowerCamelCase_=10 , lowerCamelCase_=[10, 20, 30, 40] , lowerCamelCase_=[1, 1, 2, 1] , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=3 , lowerCamelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = embeddings_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = len(lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowercase : Union[str, Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Dict = False
__lowercase : Any = False
__lowercase : int = False
__lowercase : Optional[Any] = False
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFResNetModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase = layer_type
_UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="tf" )
# forward pass
_UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
_UpperCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 147 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( _snake_case, unittest.TestCase ):
'''simple docstring'''
_snake_case = KandinskyInpaintPipeline
_snake_case = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_snake_case = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_snake_case = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case = False
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 1_0_0
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCamelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCamelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCAmelCase__ , )
UpperCamelCase = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
'''simple docstring'''
UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
UpperCamelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCamelCase = 0
if str(lowerCAmelCase__ ).startswith('''mps''' ):
UpperCamelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCamelCase = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''cpu'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCamelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCamelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCamelCase = 0
UpperCamelCase = '''a hat'''
UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCamelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 701 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ):
'''simple docstring'''
super().__init__()
UpperCamelCase = initial_learning_rate
UpperCamelCase = warmup_steps
UpperCamelCase = power
UpperCamelCase = decay_schedule_fn
UpperCamelCase = name
def __call__( self , lowerCamelCase__ ):
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase = tf.cast(lowerCamelCase__ , tf.floataa )
UpperCamelCase = tf.cast(self.warmup_steps , tf.floataa )
UpperCamelCase = global_step_float / warmup_steps_float
UpperCamelCase = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase__ , )
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __snake_case ( _UpperCAmelCase : float, _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : float = 0.0, _UpperCAmelCase : float = 0.9, _UpperCAmelCase : float = 0.9_9_9, _UpperCAmelCase : float = 1E-8, _UpperCAmelCase : Optional[float] = None, _UpperCAmelCase : Optional[float] = None, _UpperCAmelCase : float = 0.0, _UpperCAmelCase : float = 1.0, _UpperCAmelCase : Optional[List[str]] = None, ):
UpperCamelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=_UpperCAmelCase, )
if num_warmup_steps:
UpperCamelCase = WarmUp(
initial_learning_rate=_UpperCAmelCase, decay_schedule_fn=_UpperCAmelCase, warmup_steps=_UpperCAmelCase, )
if weight_decay_rate > 0.0:
UpperCamelCase = AdamWeightDecay(
learning_rate=_UpperCAmelCase, weight_decay_rate=_UpperCAmelCase, beta_a=_UpperCAmelCase, beta_a=_UpperCAmelCase, epsilon=_UpperCAmelCase, clipnorm=_UpperCAmelCase, global_clipnorm=_UpperCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=_UpperCAmelCase, )
else:
UpperCamelCase = tf.keras.optimizers.Adam(
learning_rate=_UpperCAmelCase, beta_a=_UpperCAmelCase, beta_a=_UpperCAmelCase, epsilon=_UpperCAmelCase, clipnorm=_UpperCAmelCase, global_clipnorm=_UpperCAmelCase, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = 0.001 , lowerCamelCase__ = 0.9 , lowerCamelCase__ = 0.999 , lowerCamelCase__ = 1e-7 , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "AdamWeightDecay" , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = weight_decay_rate
UpperCamelCase = include_in_weight_decay
UpperCamelCase = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = {'''WarmUp''': WarmUp}
return super(lowerCamelCase__ , cls ).from_config(lowerCamelCase__ , custom_objects=lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
super(lowerCamelCase__ , self )._prepare_local(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__ , self ).apply_gradients(zip(lowerCamelCase__ , lowerCamelCase__ ) , name=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase = apply_state or {}
UpperCamelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCamelCase = self._fallback_apply_state(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
UpperCamelCase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_dense(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
UpperCamelCase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_sparse(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return False
return True
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = None
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self._accum_steps is None:
UpperCamelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowerCamelCase__ ):
'''simple docstring'''
if not self._gradients:
UpperCamelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}' )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 350 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "spiece.model"}
__UpperCamelCase = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
__UpperCamelCase = "▁"
class lowerCAmelCase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__=100 , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE = [F'<extra_id_{i}>' for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE = len(set(filter(lambda lowerCAmelCase__ : bool('extra_id' in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
SCREAMING_SNAKE_CASE = legacy
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = extra_ids
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
@staticmethod
def __A ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _lowercase , )
return max_model_length
@property
def __A ( self ) -> Dict:
return self.sp_model.get_piece_size() + self._extra_ids
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowercase )) + [1]
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def __A ( self ) -> str:
return list(
set(filter(lambda lowerCAmelCase__ : bool(re.search(r'<extra_id_\d+>' , _lowercase ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ) -> Union[str, Any]:
return [self._convert_token_to_id(_lowercase ) for token in self.get_sentinel_tokens()]
def __A ( self , lowerCAmelCase__ ) -> List[int]:
if len(_lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = self._add_eos_if_not_present(_lowercase )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE = self._add_eos_if_not_present(_lowercase )
return token_ids_a + token_ids_a
def __getstate__( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
SCREAMING_SNAKE_CASE = SPIECE_UNDERLINE + text.replace(_lowercase , ' ' )
return super().tokenize(_lowercase , **_lowercase )
def __A ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
if not self.legacy:
SCREAMING_SNAKE_CASE = text.startswith(_lowercase )
if is_first:
SCREAMING_SNAKE_CASE = text[1:]
SCREAMING_SNAKE_CASE = self.sp_model.encode(_lowercase , out_type=_lowercase )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(_lowercase ):
SCREAMING_SNAKE_CASE = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __A ( self , lowerCAmelCase__ ) -> int:
if token.startswith('<extra_id_' ):
SCREAMING_SNAKE_CASE = re.match(r'<extra_id_(\d+)>' , _lowercase )
SCREAMING_SNAKE_CASE = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_lowercase )
def __A ( self , lowerCAmelCase__ ) -> int:
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(_lowercase )
else:
SCREAMING_SNAKE_CASE = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __A ( self , lowerCAmelCase__ ) -> str:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowercase ) + token
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(_lowercase )
SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
| 247 |
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: int ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
SCREAMING_SNAKE_CASE_ = str(bin(lowerCAmelCase__ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_ = str(bin(lowerCAmelCase__ ) )[2:]
SCREAMING_SNAKE_CASE_ = max(len(lowerCAmelCase__ ) ,len(lowerCAmelCase__ ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase__ ) ,b_binary.zfill(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowercase : List[Any] = logging.get_logger("transformers.models.encodec")
_lowercase : Optional[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_lowercase : Any = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_lowercase : int = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_lowercase : List[str] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_lowercase : Any = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_lowercase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowercase : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowercase : Optional[int] = []
_lowercase : str = []
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase__ : Tuple =getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
lowerCamelCase__ : str =getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
lowerCamelCase__ : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase__ : Dict =value
elif weight_type == "weight_g":
lowerCamelCase__ : str =value
elif weight_type == "weight_v":
lowerCamelCase__ : Any =value
elif weight_type == "bias":
lowerCamelCase__ : Union[str, Any] =value
elif weight_type == "running_mean":
lowerCamelCase__ : int =value
elif weight_type == "running_var":
lowerCamelCase__ : int =value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ : List[Any] =value
elif weight_type == "weight_ih_l0":
lowerCamelCase__ : Tuple =value
elif weight_type == "weight_hh_l0":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "bias_ih_l0":
lowerCamelCase__ : Any =value
elif weight_type == "bias_hh_l0":
lowerCamelCase__ : Tuple =value
elif weight_type == "weight_ih_l1":
lowerCamelCase__ : int =value
elif weight_type == "weight_hh_l1":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "bias_ih_l1":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "bias_hh_l1":
lowerCamelCase__ : List[Any] =value
else:
lowerCamelCase__ : Tuple =value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase__ : List[str] =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase__ : int =MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase__ : Union[str, Any] =MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(f'''{name} was ignored''' )
continue
lowerCamelCase__ : Union[str, Any] =False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase__ : str =key.split('''.*.''' )
if prefix in name and suffix in name:
lowerCamelCase__ : List[str] =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowerCamelCase__ : List[str] =True
if "*" in mapped_key:
lowerCamelCase__ : str =name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
lowerCamelCase__ : Union[str, Any] =mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
lowerCamelCase__ : Dict ="""weight_g"""
elif "weight_v" in name:
lowerCamelCase__ : Tuple ="""weight_v"""
elif "weight_ih_l0" in name:
lowerCamelCase__ : Union[str, Any] ="""weight_ih_l0"""
elif "weight_hh_l0" in name:
lowerCamelCase__ : Tuple ="""weight_hh_l0"""
elif "bias_ih_l0" in name:
lowerCamelCase__ : List[Any] ="""bias_ih_l0"""
elif "bias_hh_l0" in name:
lowerCamelCase__ : str ="""bias_hh_l0"""
elif "weight_ih_l1" in name:
lowerCamelCase__ : Any ="""weight_ih_l1"""
elif "weight_hh_l1" in name:
lowerCamelCase__ : List[str] ="""weight_hh_l1"""
elif "bias_ih_l1" in name:
lowerCamelCase__ : Dict ="""bias_ih_l1"""
elif "bias_hh_l1" in name:
lowerCamelCase__ : Dict ="""bias_hh_l1"""
elif "bias" in name:
lowerCamelCase__ : Optional[int] ="""bias"""
elif "weight" in name:
lowerCamelCase__ : str ="""weight"""
elif "running_mean" in name:
lowerCamelCase__ : List[str] ="""running_mean"""
elif "running_var" in name:
lowerCamelCase__ : Any ="""running_var"""
elif "num_batches_tracked" in name:
lowerCamelCase__ : Tuple ="""num_batches_tracked"""
else:
lowerCamelCase__ : List[str] =None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , ):
"""simple docstring"""
if config_path is not None:
lowerCamelCase__ : Any =EncodecConfig.from_pretrained(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase__ : Optional[int] =[8, 5, 4, 4]
lowerCamelCase__ : List[Any] =[2.2]
lowerCamelCase__ : List[str] =64
lowerCamelCase__ : Optional[Any] =32000
lowerCamelCase__ : List[str] =2048
lowerCamelCase__ : Dict =False
lowerCamelCase__ : List[Any] =False
lowerCamelCase__ : Any =False
elif model_name == "encodec_48khz":
lowerCamelCase__ : Dict =[8, 5, 4, 2]
lowerCamelCase__ : Optional[Any] =[3.0, 6.0, 12.0, 24.0]
lowerCamelCase__ : Optional[int] =48000
lowerCamelCase__ : str =2
lowerCamelCase__ : Tuple =False
lowerCamelCase__ : Union[str, Any] ="""time_group_norm"""
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : Dict =1.0
lowerCamelCase__ : str =0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCamelCase__ : Optional[Any] =EncodecModel(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Any =torch.load(UpperCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase__ : Optional[Any] =original_checkpoint["""best_state"""]
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowercase : Optional[int] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 720 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : str ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowercase ( self : Tuple ) -> Tuple:
__lowerCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__lowerCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__lowerCamelCase : int = 'xvjiarui/stable-diffusion-2-inpainting'
__lowerCamelCase ,__lowerCamelCase : int = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
__lowerCamelCase : List[Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__lowerCamelCase : List[Any] = jax.random.PRNGKey(0 )
__lowerCamelCase : Dict = 50
__lowerCamelCase : Tuple = jax.device_count()
__lowerCamelCase : Dict = num_samples * [prompt]
__lowerCamelCase : List[Any] = num_samples * [init_image]
__lowerCamelCase : Optional[int] = num_samples * [mask_image]
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = pipeline.prepare_inputs(_a , _a , _a )
# shard inputs and rng
__lowerCamelCase : Any = replicate(_a )
__lowerCamelCase : Union[str, Any] = jax.random.split(_a , jax.device_count() )
__lowerCamelCase : Optional[Any] = shard(_a )
__lowerCamelCase : Any = shard(_a )
__lowerCamelCase : List[str] = shard(_a )
__lowerCamelCase : Dict = pipeline(
_a , _a , _a , _a , _a , _a , jit=_a )
__lowerCamelCase : Union[str, Any] = output.images.reshape(_a , 512 , 512 , 3 )
__lowerCamelCase : Any = images[0, 253:256, 253:256, -1]
__lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase : str = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 459 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 459 | 1 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_a : Optional[int]= logging.getLogger(__name__)
def __UpperCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ) -> str:
'''simple docstring'''
__snake_case : Dict = np.argmax(__UpperCamelCase , axis=1 )
return np.sum(outputs == labels )
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(__UpperCamelCase , encoding='utf_8' ) as f:
__snake_case : List[Any] = csv.reader(__UpperCamelCase )
__snake_case : Any = []
next(__UpperCamelCase ) # skip the first line
for line in tqdm(__UpperCamelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = []
for dataset in encoded_datasets:
__snake_case : str = len(__UpperCamelCase )
__snake_case : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__snake_case : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
__snake_case : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__snake_case : Union[str, Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__UpperCamelCase ):
__snake_case : Optional[int] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__snake_case : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__snake_case : Dict = with_conta
__snake_case : List[Any] = with_conta
__snake_case : str = len(__UpperCamelCase ) - 1
__snake_case : Optional[Any] = len(__UpperCamelCase ) - 1
__snake_case : List[Any] = with_conta
__snake_case : Any = with_conta
__snake_case : Optional[int] = mc_label
__snake_case : Optional[int] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__UpperCamelCase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__UpperCamelCase , default='' )
parser.add_argument('--eval_dataset' , type=__UpperCamelCase , default='' )
parser.add_argument('--seed' , type=__UpperCamelCase , default=42 )
parser.add_argument('--num_train_epochs' , type=__UpperCamelCase , default=3 )
parser.add_argument('--train_batch_size' , type=__UpperCamelCase , default=8 )
parser.add_argument('--eval_batch_size' , type=__UpperCamelCase , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCamelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__UpperCamelCase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__UpperCamelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__UpperCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__UpperCamelCase , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCamelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__UpperCamelCase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__UpperCamelCase , default=0.01 )
parser.add_argument('--lm_coef' , type=__UpperCamelCase , default=0.9 )
parser.add_argument('--n_valid' , type=__UpperCamelCase , default=3_74 )
parser.add_argument('--server_ip' , type=__UpperCamelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__UpperCamelCase , default='' , help='Can be used for distant debugging.' )
__snake_case : int = parser.parse_args()
print(__UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__snake_case : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__snake_case : int = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__UpperCamelCase , __UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__snake_case : List[Any] = ['_start_', '_delimiter_', '_classify_']
__snake_case : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__UpperCamelCase )
__snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
__snake_case : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__UpperCamelCase ) )
model.to(__UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(UpperCAmelCase_ : List[str] ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return obj
return [tokenize_and_encode(__UpperCamelCase ) for o in obj]
logger.info('Encoding dataset...' )
__snake_case : int = load_rocstories_dataset(args.train_dataset )
__snake_case : List[str] = load_rocstories_dataset(args.eval_dataset )
__snake_case : str = (train_dataset, eval_dataset)
__snake_case : int = tokenize_and_encode(__UpperCamelCase )
# Compute the max input length for the Transformer
__snake_case : Dict = model.config.n_positions // 2 - 2
__snake_case : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__snake_case : List[Any] = min(__UpperCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__snake_case : Dict = pre_process_datasets(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase )
__snake_case , __snake_case : Dict = tensor_datasets[0], tensor_datasets[1]
__snake_case : List[str] = TensorDataset(*__UpperCamelCase )
__snake_case : str = RandomSampler(__UpperCamelCase )
__snake_case : Optional[int] = DataLoader(__UpperCamelCase , sampler=__UpperCamelCase , batch_size=args.train_batch_size )
__snake_case : int = TensorDataset(*__UpperCamelCase )
__snake_case : int = SequentialSampler(__UpperCamelCase )
__snake_case : Optional[Any] = DataLoader(__UpperCamelCase , sampler=__UpperCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__snake_case : Optional[int] = args.max_steps
__snake_case : Optional[int] = args.max_steps // (len(__UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__snake_case : Union[str, Any] = len(__UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__snake_case : List[Any] = list(model.named_parameters() )
__snake_case : Optional[Any] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__snake_case : List[Any] = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__snake_case : Dict = AdamW(__UpperCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
__snake_case : Any = get_linear_schedule_with_warmup(
__UpperCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__UpperCamelCase )
if args.do_train:
__snake_case , __snake_case , __snake_case : str = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__snake_case : Union[str, Any] = 0
__snake_case : List[Any] = 0
__snake_case : int = tqdm(__UpperCamelCase , desc='Training' )
for step, batch in enumerate(__UpperCamelCase ):
__snake_case : List[str] = tuple(t.to(__UpperCamelCase ) for t in batch )
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = batch
__snake_case : Any = model(__UpperCamelCase , mc_token_ids=__UpperCamelCase , lm_labels=__UpperCamelCase , mc_labels=__UpperCamelCase )
__snake_case : List[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__snake_case : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__snake_case : Optional[Any] = 'Training loss: {:.2e} lr: {:.2e}'.format(__UpperCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__snake_case : str = model.module if hasattr(__UpperCamelCase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__snake_case : str = os.path.join(args.output_dir , __UpperCamelCase )
__snake_case : Optional[int] = os.path.join(args.output_dir , __UpperCamelCase )
torch.save(model_to_save.state_dict() , __UpperCamelCase )
model_to_save.config.to_json_file(__UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__snake_case : List[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__snake_case : Tuple = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__UpperCamelCase )
if args.do_eval:
model.eval()
__snake_case , __snake_case : Any = 0, 0
__snake_case , __snake_case : Tuple = 0, 0
for batch in tqdm(__UpperCamelCase , desc='Evaluating' ):
__snake_case : Tuple = tuple(t.to(__UpperCamelCase ) for t in batch )
__snake_case , __snake_case , __snake_case , __snake_case : Dict = batch
with torch.no_grad():
__snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = model(
__UpperCamelCase , mc_token_ids=__UpperCamelCase , lm_labels=__UpperCamelCase , mc_labels=__UpperCamelCase )
__snake_case : Union[str, Any] = mc_logits.detach().cpu().numpy()
__snake_case : Dict = mc_labels.to('cpu' ).numpy()
__snake_case : Any = accuracy(__UpperCamelCase , __UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__snake_case : int = eval_loss / nb_eval_steps
__snake_case : Optional[Any] = eval_accuracy / nb_eval_examples
__snake_case : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
__snake_case : Optional[int] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__snake_case : List[Any] = os.path.join(args.output_dir , 'eval_results.txt' )
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __UpperCamelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 708 | """simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase :
def __init__(self : Tuple , _A : List[Any] , _A : int=sys.maxsize) -> int:
__snake_case : Dict = 'bilinear'
__snake_case : int = max_size
__snake_case : Any = short_edge_length
def __call__(self : Optional[Any] , _A : Any) -> Dict:
__snake_case : Any = []
for img in imgs:
__snake_case , __snake_case : int = img.shape[:2]
# later: provide list and randomly choose index for resize
__snake_case : Optional[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__snake_case : int = size * 1.0 / min(_A , _A)
if h < w:
__snake_case , __snake_case : str = size, scale * w
else:
__snake_case , __snake_case : List[str] = scale * h, size
if max(_A , _A) > self.max_size:
__snake_case : str = self.max_size * 1.0 / max(_A , _A)
__snake_case : Any = newh * scale
__snake_case : Any = neww * scale
__snake_case : Optional[int] = int(neww + 0.5)
__snake_case : str = int(newh + 0.5)
if img.dtype == np.uinta:
__snake_case : str = Image.fromarray(_A)
__snake_case : str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__snake_case : Tuple = np.asarray(_A)
else:
__snake_case : str = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__snake_case : Dict = nn.functional.interpolate(
_A , (newh, neww) , mode=self.interp_method , align_corners=_A).squeeze(0)
img_augs.append(_A)
return img_augs
class UpperCamelCase :
def __init__(self : List[str] , _A : List[str]) -> Optional[int]:
__snake_case : Union[str, Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__snake_case : Union[str, Any] = cfg.INPUT.FORMAT
__snake_case : List[Any] = cfg.SIZE_DIVISIBILITY
__snake_case : Tuple = cfg.PAD_VALUE
__snake_case : int = cfg.INPUT.MAX_SIZE_TEST
__snake_case : List[Any] = cfg.MODEL.DEVICE
__snake_case : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case : Any = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case : Union[str, Any] = lambda _A: (x - self.pixel_mean) / self.pixel_std
def _lowercase (self : Tuple , _A : str) -> int:
__snake_case : str = tuple(max(_A) for s in zip(*[img.shape for img in images]))
__snake_case : int = [im.shape[-2:] for im in images]
__snake_case : int = [
nn.functional.pad(
_A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_A , _A)
]
return torch.stack(_A), torch.tensor(_A)
def __call__(self : Optional[Any] , _A : Dict , _A : Union[str, Any]=False) -> Optional[int]:
with torch.no_grad():
if not isinstance(_A , _A):
__snake_case : str = [images]
if single_image:
assert len(_A) == 1
for i in range(len(_A)):
if isinstance(images[i] , torch.Tensor):
images.insert(_A , images.pop(_A).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_A , torch.as_tensor(img_tensorize(images.pop(_A) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__snake_case : Any = torch.tensor([im.shape[:2] for im in images])
__snake_case : Tuple = self.aug(_A)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__snake_case : List[str] = [self.normalizer(_A) for x in images]
# now pad them to do the following operations
__snake_case , __snake_case : List[Any] = self.pad(_A)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__snake_case : int = torch.true_divide(_A , _A)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> int:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple[int, int] ) -> Union[str, Any]:
'''simple docstring'''
assert torch.isfinite(UpperCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
__snake_case , __snake_case : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 1].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 2].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 3].clamp_(min=0 , max=UpperCAmelCase_ )
| 192 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict=13 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Dict=32 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Any=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=10 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : int = image_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_labels
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : int = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__( self :Optional[int] ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFViTModel(config=__a )
__SCREAMING_SNAKE_CASE : Optional[int] = model(__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_size // 2
__SCREAMING_SNAKE_CASE : int = pixel_values[:, :, :image_size, :image_size]
__SCREAMING_SNAKE_CASE : Tuple = model(__a , interpolate_pos_encoding=__a , training=__a )
__SCREAMING_SNAKE_CASE : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE : Tuple = TFViTForImageClassification(__a )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__SCREAMING_SNAKE_CASE : str = self.image_size // 2
__SCREAMING_SNAKE_CASE : Optional[int] = pixel_values[:, :, :image_size, :image_size]
__SCREAMING_SNAKE_CASE : Any = model(__a , interpolate_pos_encoding=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = TFViTForImageClassification(__a )
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : int = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : str = False
def __magic_name__( self :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : str = TFViTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __magic_name__( self :Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__( self :List[Any] ) -> Tuple:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__( self :Any ) -> Union[str, Any]:
pass
def __magic_name__( self :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , tf.keras.layers.Layer ) )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__a )
__SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __magic_name__( self :Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __magic_name__( self :Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__a )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :List[str] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
__SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
__SCREAMING_SNAKE_CASE : Tuple = prepare_img()
__SCREAMING_SNAKE_CASE : Dict = image_processor(images=__a , return_tensors='''tf''' )
# forward pass
__SCREAMING_SNAKE_CASE : str = model(**__a )
# verify the logits
__SCREAMING_SNAKE_CASE : Tuple = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
__SCREAMING_SNAKE_CASE : List[str] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __a , atol=1E-4 )
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Tuple = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 623 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = int(_A )
assert noofclusters < len(_A )
# Find out the dimensionality
a_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
a_ = list(range(len(_A ) ) )
shuffle(_A )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
a_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
a_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
a_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_A )
]
##These nodes will assign the centroid Variables the appropriate
##values
a_ = tf.placeholder('''float64''' , [dim] )
a_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(_A , _A ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
a_ = [tf.Variable(0 ) for i in range(len(_A ) )]
##These nodes will assign an assignment Variable the appropriate
##value
a_ = tf.placeholder('''int32''' )
a_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_A , _A ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
a_ = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
a_ = tf.reduce_mean(_A , 0 )
##Node for computing Euclidean distances
# Placeholders for input
a_ = tf.placeholder('''float''' , [dim] )
a_ = tf.placeholder('''float''' , [dim] )
a_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_A , _A ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
a_ = tf.placeholder('''float''' , [noofclusters] )
a_ = tf.argmin(_A , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
a_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(_A )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
a_ = 100
for _ in range(_A ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_A ) ):
a_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
a_ = [
sess.run(_A , feed_dict={va: vect, va: sess.run(_A )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
a_ = sess.run(
_A , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_A ):
# Collect all the vectors assigned to this cluster
a_ = [
vectors[i]
for i in range(len(_A ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
a_ = sess.run(
_A , feed_dict={mean_input: array(_A )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
a_ = sess.run(_A )
a_ = sess.run(_A )
return centroids, assignments
| 721 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = 0.00
lowerCAmelCase : List[Any] = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase : int = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(SCREAMING_SNAKE_CASE )
first_sum += 1 / float(SCREAMING_SNAKE_CASE )
index += 1
return 1 / first_sum
def a__ ( SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 0.00
lowerCAmelCase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase : Tuple = f"""Resistor at index {index} has a negative value!"""
raise ValueError(SCREAMING_SNAKE_CASE )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Union[str, Any] =FLAX_MODEL_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Optional[int] =FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict =FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : int =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : int =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Optional[Any] =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 645 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = "▁"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
lowercase__ = {
"google/pegasus-xsum": 512,
}
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : int , lowercase_ : Optional[Any]="<pad>" , lowercase_ : Dict="</s>" , lowercase_ : int="<unk>" , lowercase_ : Optional[int]="<mask_2>" , lowercase_ : str="<mask_1>" , lowercase_ : List[str]=None , lowercase_ : List[Any]=103 , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Union[str, Any] , ) -> None:
UpperCAmelCase : Any = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowercase_ )}, but is"""
f""" {type(lowercase_ )}""" )
UpperCAmelCase : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase : Optional[Any] = additional_special_tokens_extended
else:
UpperCAmelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : Dict = mask_token_sent
UpperCAmelCase : Any = vocab_file
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# add special tokens to encoder dict
UpperCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return len(self.sp_model ) + self.offset
def UpperCAmelCase_ ( self : str ) -> Dict[str, int]:
UpperCAmelCase : List[str] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self : List[str] , lowercase_ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : Any , lowercase_ : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCAmelCase : Union[str, Any] = self.sp_model.piece_to_id(lowercase_ )
return sp_id + self.offset
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCAmelCase : int = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : str = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : int , lowercase_ : List[str]=False ) -> int:
return 1
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[Any] ) -> Any:
UpperCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase_ ( self : Dict , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Any=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
import qiskit
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> qiskit.result.counts.Counts:
lowercase__ : Optional[int] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowercase__ : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] ,[0, 1] )
# Execute the circuit on the qasm simulator
lowercase__ : int = qiskit.execute(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__a : str = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}') | 397 |
from math import factorial
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowercase__ : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__ : Tuple = float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 397 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : Any , **a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase__ ( _a):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __UpperCamelCase ( self : Tuple , a : Union[str, Any] , a : Any , a : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = pipeline(
"document-question-answering" , model=a , tokenizer=a , image_processor=a )
SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
SCREAMING_SNAKE_CASE : int = list(zip(*apply_tesseract(load_image(a ) , a , "" ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = "What is the placebo?"
SCREAMING_SNAKE_CASE : List[str] = [
{
"image": load_image(a ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __UpperCamelCase ( self : List[str] , a : List[Any] , a : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(a , top_k=2 )
self.assertEqual(
a , [
[
{"score": ANY(a ), "answer": ANY(a ), "start": ANY(a ), "end": ANY(a )},
{"score": ANY(a ), "answer": ANY(a ), "start": ANY(a ), "end": ANY(a )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
SCREAMING_SNAKE_CASE : int = INVOICE_URL
SCREAMING_SNAKE_CASE : int = "How many cats are there?"
SCREAMING_SNAKE_CASE : Optional[int] = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , a )
SCREAMING_SNAKE_CASE : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , a )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE : str = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(a , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : str = dqa_pipeline(image=a , question=a , words=a , boxes=a , top_k=2 )
self.assertEqual(a , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
SCREAMING_SNAKE_CASE : Any = INVOICE_URL
SCREAMING_SNAKE_CASE : Any = "What is the invoice number?"
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE : Optional[Any] = INVOICE_URL
SCREAMING_SNAKE_CASE : int = "What is the invoice number?"
SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=a )
SCREAMING_SNAKE_CASE : List[str] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=a , revision="3dc6de3" , )
SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE : int = "What is the invoice number?"
SCREAMING_SNAKE_CASE : str = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE : int = list(zip(*apply_tesseract(load_image(a ) , a , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=a )
SCREAMING_SNAKE_CASE : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=a , revision="3dc6de3" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
SCREAMING_SNAKE_CASE : List[Any] = "What is the invoice number?"
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE : int = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE : int = list(zip(*apply_tesseract(load_image(a ) , a , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
SCREAMING_SNAKE_CASE : int = INVOICE_URL
SCREAMING_SNAKE_CASE : Union[str, Any] = "What is the invoice number?"
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass | 705 |
from __future__ import annotations
import math
import random
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[Any] = []
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : int = 0
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head == self.tail
def __UpperCamelCase ( self : Optional[int] , a : Any ) -> None:
"""simple docstring"""
self.data.append(a )
SCREAMING_SNAKE_CASE : List[str] = self.tail + 1
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.data[self.head]
SCREAMING_SNAKE_CASE : List[str] = self.head + 1
return ret
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self.tail - self.head
def __UpperCamelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a : Any ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = data
SCREAMING_SNAKE_CASE : MyNode | None = None
SCREAMING_SNAKE_CASE : MyNode | None = None
SCREAMING_SNAKE_CASE : int = 1
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return self.data
def __UpperCamelCase ( self : Optional[int] ) -> MyNode | None:
"""simple docstring"""
return self.left
def __UpperCamelCase ( self : Union[str, Any] ) -> MyNode | None:
"""simple docstring"""
return self.right
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.height
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = data
def __UpperCamelCase ( self : Any , a : MyNode | None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = node
def __UpperCamelCase ( self : List[str] , a : MyNode | None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = node
def __UpperCamelCase ( self : Optional[int] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = height
def lowerCamelCase__ ( _a):
if node is None:
return 0
return node.get_height()
def lowerCamelCase__ ( _a , _a):
if a > b:
return a
return b
def lowerCamelCase__ ( _a):
print("left rotation node:" , node.get_data())
SCREAMING_SNAKE_CASE : List[str] = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(_a)
SCREAMING_SNAKE_CASE : List[Any] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
SCREAMING_SNAKE_CASE : Optional[int] = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_a)
return ret
def lowerCamelCase__ ( _a):
print("right rotation node:" , node.get_data())
SCREAMING_SNAKE_CASE : Dict = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
SCREAMING_SNAKE_CASE : Tuple = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_a)
return ret
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_a))
return right_rotation(_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_a))
return left_rotation(_a)
def lowerCamelCase__ ( _a , _a):
if node is None:
return MyNode(_a)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _a))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE : List[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE : Tuple = right_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = lr_rotation(_a)
else:
node.set_right(insert_node(node.get_right() , _a))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
SCREAMING_SNAKE_CASE : Any = node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE : Union[str, Any] = rl_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = left_rotation(_a)
SCREAMING_SNAKE_CASE : str = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
return node
def lowerCamelCase__ ( _a):
while True:
SCREAMING_SNAKE_CASE : List[Any] = root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE : str = right_child
return root.get_data()
def lowerCamelCase__ ( _a):
while True:
SCREAMING_SNAKE_CASE : Optional[int] = root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE : List[str] = left_child
return root.get_data()
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = root.get_left()
SCREAMING_SNAKE_CASE : List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE : Any = get_left_most(_a)
root.set_data(_a)
root.set_right(del_node(_a , _a))
elif left_child is not None:
SCREAMING_SNAKE_CASE : Dict = left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE : str = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data")
return root
else:
root.set_left(del_node(_a , _a))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_a , _a))
if get_height(_a) - get_height(_a) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
SCREAMING_SNAKE_CASE : List[str] = left_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = rl_rotation(_a)
elif get_height(_a) - get_height(_a) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
SCREAMING_SNAKE_CASE : str = right_rotation(_a)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = lr_rotation(_a)
SCREAMING_SNAKE_CASE : List[str] = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(_a)
return root
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : MyNode | None = None
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
return get_height(self.root )
def __UpperCamelCase ( self : List[Any] , a : Any ) -> None:
"""simple docstring"""
print("insert:" + str(a ) )
SCREAMING_SNAKE_CASE : Any = insert_node(self.root , a )
def __UpperCamelCase ( self : List[Any] , a : Any ) -> None:
"""simple docstring"""
print("delete:" + str(a ) )
if self.root is None:
print("Tree is empty!" )
return
SCREAMING_SNAKE_CASE : Optional[int] = del_node(self.root , a )
def __str__( self : Optional[int] , ) -> str: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ""
SCREAMING_SNAKE_CASE : Optional[int] = MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE : Any = self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE : Dict = 0
while not q.is_empty():
SCREAMING_SNAKE_CASE : Dict = q.pop()
SCREAMING_SNAKE_CASE : List[Any] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(a )
q.push(a )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE : List[str] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , a ) - 1:
SCREAMING_SNAKE_CASE : List[str] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCamelCase__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a_ = AVLtree()
a_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t)) | 193 | 0 |
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( __snake_case : int ) -> bool:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
__A : Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
__A : Tuple = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A : Union[str, Any] = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A : List[str] = list(range(2 , n + 1 ) )
__A : Any = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A : Any = 0
# filters actual prime numbers.
__A : Dict = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Union[str, Any]:
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
__A : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : List[Any] ) -> List[str]:
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
__A : Optional[int] = [] # this list will be returns of the function.
# potential prime number factors.
__A : int = 2
__A : Tuple = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A : Any = 0
# prime factorization of 'number'
__A : Dict = prime_factorization(__snake_case )
__A : Union[str, Any] = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> str:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A : Tuple = 0
# prime factorization of 'number'
__A : Any = prime_factorization(__snake_case )
__A : Any = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( __snake_case : List[str] ) -> str:
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
__A : List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A : List[str] = get_prime_numbers(__snake_case )
__A : Tuple = len(__snake_case )
# run variable for while-loops.
__A : Optional[int] = 0
__A : Union[str, Any] = None
# exit variable. for break up the loops
__A : str = True
while i < len_pn and loop:
__A : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : int ) -> Any:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A : List[str] = 0
while numbera != 0:
__A : Optional[int] = numbera % numbera
__A : Dict = numbera
__A : List[Any] = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : List[Any] ) -> List[Any]:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A : Optional[Any] = prime_factorization(__snake_case )
__A : Union[str, Any] = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
__A : str = []
__A : Union[str, Any] = []
__A : Union[str, Any] = max(__snake_case , __snake_case )
__A : Union[str, Any] = 0
__A : Tuple = 0
__A : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A : Dict = prime_fac_a.count(__snake_case )
__A : List[str] = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
__A : int = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A : str = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> List[Any]:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
__A : List[Any] = 0
__A : str = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Dict:
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A : Any = p_number_a + 1 # jump to the next number
__A : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( __snake_case : Dict ) -> str:
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
__A : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Dict:
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
__A : Optional[Any] = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( __snake_case : Any , __snake_case : int ) -> List[Any]:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A : Optional[int] = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( __snake_case : Tuple ) -> int:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
__A : Optional[int] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Dict:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
__A : Union[str, Any] = 0
__A : List[str] = 1
__A : Dict = 1 # this will be return
for _ in range(n - 1 ):
__A : str = ans
ans += fiba
__A : List[Any] = tmp
return ans | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : str , *a : Tuple , **a : List[str] ) -> Dict:
"""simple docstring"""
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , a , )
super().__init__(*a , **a ) | 716 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__A ) , 'Tatoeba directory does not exist.' )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=a )
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.resolver.write_model_card("opus-mt-he-en" , dry_run=a )
assert mmeta["long_pair"] == "heb-eng" | 193 | 0 |
UpperCamelCase__ : List[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
SCREAMING_SNAKE_CASE_ : Stack[int] = Stack()
SCREAMING_SNAKE_CASE_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase_ )
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE_ : Tuple = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE_ : Any = operators[opr](lowerCamelCase_ , lowerCamelCase_ )
operand_stack.push(lowerCamelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 105 |
import os
import numpy
import onnx
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = a.name
SCREAMING_SNAKE_CASE_ : Dict = b.name
SCREAMING_SNAKE_CASE_ : str = ''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
SCREAMING_SNAKE_CASE_ : Optional[Any] = a == b
SCREAMING_SNAKE_CASE_ : str = name_a
SCREAMING_SNAKE_CASE_ : str = name_b
return res
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_ : List[str] = inits[i].name
SCREAMING_SNAKE_CASE_ : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = os.path.dirname(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Any = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : int = set()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : int = 0
for i in range(len(lowerCamelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase_ )
dup_set.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = inits[j].data_type
SCREAMING_SNAKE_CASE_ : str = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase_ )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_ : int = inits[i].name
SCREAMING_SNAKE_CASE_ : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' )
SCREAMING_SNAKE_CASE_ : int = sorted(lowerCamelCase_ )
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'optimized_' + model_file_name
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
onnx.save(lowerCamelCase_ , lowerCamelCase_ )
return new_model
| 105 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
snake_case = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
snake_case = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
snake_case = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
snake_case = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
snake_case = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowercase )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE : Any = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE : Dict = collections.defaultdict(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = collections.defaultdict(lowercase )
SCREAMING_SNAKE_CASE : str = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if _re_tf_models.match(lowercase ) is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf_models
SCREAMING_SNAKE_CASE : Optional[int] = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = flax_models
SCREAMING_SNAKE_CASE : List[str] = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
SCREAMING_SNAKE_CASE : Dict = pt_models
SCREAMING_SNAKE_CASE : Dict = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
SCREAMING_SNAKE_CASE : Optional[int] = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE : str = "".join(camel_case_split(lowercase )[:-1] )
SCREAMING_SNAKE_CASE : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
SCREAMING_SNAKE_CASE : List[str] = list(lowercase )
all_models.sort()
SCREAMING_SNAKE_CASE : str = {"model_type": all_models}
SCREAMING_SNAKE_CASE : int = [pt_models[t] for t in all_models]
SCREAMING_SNAKE_CASE : Tuple = [tf_models[t] for t in all_models]
SCREAMING_SNAKE_CASE : Any = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
SCREAMING_SNAKE_CASE : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE : Dict = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
SCREAMING_SNAKE_CASE : Optional[int] = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE : Optional[Any] = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
SCREAMING_SNAKE_CASE : Optional[int] = "AutoTokenizer"
SCREAMING_SNAKE_CASE : Dict = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
SCREAMING_SNAKE_CASE : List[str] = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
SCREAMING_SNAKE_CASE : Union[str, Any] = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase , lowercase , lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase , lowercase ):
continue
# First extract all model_names
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for name in getattr(lowercase , lowercase ).values():
if isinstance(lowercase , lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = get_frameworks_table()
SCREAMING_SNAKE_CASE : Any = Dataset.from_pandas(lowercase )
SCREAMING_SNAKE_CASE : Dict = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_json(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(lowercase ) )
}
SCREAMING_SNAKE_CASE : Optional[Any] = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
SCREAMING_SNAKE_CASE : Any = sorted(table.keys() )
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(lowercase , "pipeline_tags.json" ) )
if commit_sha is not None:
SCREAMING_SNAKE_CASE : Any = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
SCREAMING_SNAKE_CASE : str = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=lowercase , repo_type="dataset" , token=lowercase , commit_message=lowercase , )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
SCREAMING_SNAKE_CASE : Optional[Any] = transformers_module.pipelines.SUPPORTED_TASKS
SCREAMING_SNAKE_CASE : List[Any] = []
for key in pipeline_tasks:
if key not in in_table:
SCREAMING_SNAKE_CASE : str = pipeline_tasks[key]["pt"]
if isinstance(lowercase , (list, tuple) ):
SCREAMING_SNAKE_CASE : Optional[Any] = model[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE : str = ", ".join(lowercase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 705 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(lowercase )
SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(args=lowercase )
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE : int = "Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE : Optional[int] = " ".join(str(lowercase ).split(" " )[:-1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
SCREAMING_SNAKE_CASE : Any = eval(str(lowercase ).split(" " )[-1] )
SCREAMING_SNAKE_CASE : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 488 | 0 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowercase__ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowercase__ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
lowercase__ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def __magic_name__ ( _lowerCamelCase : Any ):
def remove_articles(_lowerCamelCase : Optional[int] ):
__a : Any = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(_lowerCamelCase , """ """ , _lowerCamelCase )
def white_space_fix(_lowerCamelCase : Any ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : int ):
__a : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict ):
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
__a : int = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )]
return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 1_0_0
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] ):
__a : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
__a : Optional[int] = Counter(_lowerCamelCase )
__a : List[Any] = Counter(_lowerCamelCase )
__a : Union[str, Any] = Counter()
for sgram, scount in sgramcounter.items():
__a : Any = scount * numref
__a : Optional[Any] = Counter(_lowerCamelCase )
__a : str = Counter()
for cgram, ccount in cgramcounter.items():
__a : int = ccount * numref
# KEEP
__a : Tuple = sgramcounter_rep & cgramcounter_rep
__a : Dict = keepgramcounter_rep & rgramcounter
__a : Any = sgramcounter_rep & rgramcounter
__a : str = 0
__a : Any = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__a : Optional[Any] = 1
__a : str = 1
if len(_lowerCamelCase ) > 0:
__a : Optional[Any] = keeptmpscorea / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__a : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__a : Optional[Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__a : Union[str, Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__a : int = sgramcounter_rep - cgramcounter_rep
__a : Dict = delgramcounter_rep - rgramcounter
__a : Dict = sgramcounter_rep - rgramcounter
__a : List[Any] = 0
__a : Any = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__a : Union[str, Any] = 1
if len(_lowerCamelCase ) > 0:
__a : Union[str, Any] = deltmpscorea / len(_lowerCamelCase )
# ADDITION
__a : List[Any] = set(_lowerCamelCase ) - set(_lowerCamelCase )
__a : List[Any] = set(_lowerCamelCase ) & set(_lowerCamelCase )
__a : Any = set(_lowerCamelCase ) - set(_lowerCamelCase )
__a : List[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__a : Optional[int] = 1
__a : Union[str, Any] = 1
if len(_lowerCamelCase ) > 0:
__a : List[str] = addtmpscore / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__a : Dict = addtmpscore / len(_lowerCamelCase )
__a : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
__a : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ):
__a : List[Any] = len(_lowerCamelCase )
__a : Optional[Any] = ssent.split(""" """ )
__a : Optional[int] = csent.split(""" """ )
__a : List[Any] = []
__a : Tuple = []
__a : List[str] = []
__a : int = []
__a : List[Any] = []
__a : Optional[Any] = []
__a : List[Any] = []
__a : List[Any] = []
__a : Optional[int] = []
__a : Any = []
for rsent in rsents:
__a : Dict = rsent.split(""" """ )
__a : Tuple = []
__a : Tuple = []
__a : Dict = []
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__a : Tuple = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__a : str = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__a : List[Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__a : str = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__a : List[Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__a : List[Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__a : Optional[int] = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__a : str = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__a : Union[str, Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(_lowerCamelCase )
((__a) , (__a) , (__a)) : List[Any] = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__a) , (__a) , (__a)) : List[Any] = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__a) , (__a) , (__a)) : Dict = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__a) , (__a) , (__a)) : Dict = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__a : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
__a : List[str] = sum([addascore, addascore, addascore, addascore] ) / 4
__a : Optional[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : bool = True , _lowerCamelCase : str = "13a" , _lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__a : str = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__a : int = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase )
else:
__a : str = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase )
elif tokenizer == "moses":
__a : int = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase )
elif tokenizer == "penn":
__a : Dict = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase )
else:
__a : str = sentence
if not return_str:
__a : str = normalized_sent.split()
return normalized_sent
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
__a : Optional[int] = 0
for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] )
__a : List[str] = sari_score / len(_lowerCamelCase )
return 1_0_0 * sari_score
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]="exp" , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Optional[int]=False , ):
__a : str = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__a : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
__a : str = sacrebleu.corpus_bleu(
_lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def lowerCAmelCase__(self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = {}
result.update({"""sari""": compute_sari(sources=_lowercase , predictions=_lowercase , references=_lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowercase , references=_lowercase )} )
result.update({"""exact""": compute_em(predictions=_lowercase , references=_lowercase )} )
return result
| 581 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
__a : List[str] = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
__a : Tuple = random.randint(0 , len(_lowerCamelCase ) - 1 )
__a : Any = parent_a[:random_slice] + parent_a[random_slice:]
__a : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : list[str] ):
__a : List[str] = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a : Dict = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : tuple[str, float] , _lowerCamelCase : list[tuple[str, float]] , _lowerCamelCase : list[str] , ):
__a : Tuple = []
# Generate more children proportionally to the fitness score.
__a : Union[str, Any] = int(parent_a[1] * 1_0_0 ) + 1
__a : Optional[Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(_lowerCamelCase ):
__a : Any = population_score[random.randint(0 , _lowerCamelCase )][0]
__a , __a : Union[str, Any] = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : list[str] , _lowerCamelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a : Optional[Any] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a : List[Any] = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_lowerCamelCase )
# Generate random starting population.
__a : Dict = []
for _ in range(_lowerCamelCase ):
population.append("""""".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a : Any = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
__a : Union[str, Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
__a : Tuple = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase__ , lowercase__ , lowercase__ = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 581 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : str = AudioLDMPipeline
snake_case_ : Any = TEXT_TO_AUDIO_PARAMS
snake_case_ : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
snake_case_ : Dict = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCAmelCase , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0)
_snake_case : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
_snake_case : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_snake_case : Any = ClapTextModelWithProjection(lowerCAmelCase)
_snake_case : List[str] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77)
_snake_case : List[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCAmelCase , )
_snake_case : Tuple = SpeechTaHifiGan(lowerCAmelCase)
_snake_case : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=0) -> int:
"""simple docstring"""
if str(lowerCAmelCase).startswith("""mps"""):
_snake_case : List[str] = torch.manual_seed(lowerCAmelCase)
else:
_snake_case : Optional[Any] = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
_snake_case : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Dict = self.get_dummy_components()
_snake_case : Optional[Any] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Tuple = audioldm_pipe(**lowerCAmelCase)
_snake_case : int = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 256
_snake_case : Union[str, Any] = audio[:10]
_snake_case : Union[str, Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def UpperCamelCase_ ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : int = audioldm_pipe.to(lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[Any] = 3 * [inputs["""prompt"""]]
# forward
_snake_case : str = audioldm_pipe(**lowerCAmelCase)
_snake_case : Dict = output.audios[0]
_snake_case : Dict = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[str] = 3 * [inputs.pop("""prompt""")]
_snake_case : List[Any] = audioldm_pipe.tokenizer(
lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
_snake_case : List[Any] = text_inputs["""input_ids"""].to(lowerCAmelCase)
_snake_case : str = audioldm_pipe.text_encoder(
lowerCAmelCase , )
_snake_case : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : Tuple = F.normalize(lowerCAmelCase , dim=-1)
_snake_case : List[Any] = prompt_embeds
# forward
_snake_case : Union[str, Any] = audioldm_pipe(**lowerCAmelCase)
_snake_case : str = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Optional[Any] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Tuple = audioldm_pipe.to(lowerCAmelCase)
_snake_case : List[Any] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Optional[int] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Optional[Any] = 3 * ["""this is a negative prompt"""]
_snake_case : int = negative_prompt
_snake_case : Dict = 3 * [inputs["""prompt"""]]
# forward
_snake_case : Dict = audioldm_pipe(**lowerCAmelCase)
_snake_case : Union[str, Any] = output.audios[0]
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Union[str, Any] = 3 * [inputs.pop("""prompt""")]
_snake_case : Tuple = []
for p in [prompt, negative_prompt]:
_snake_case : Optional[Any] = audioldm_pipe.tokenizer(
lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
_snake_case : Any = text_inputs["""input_ids"""].to(lowerCAmelCase)
_snake_case : int = audioldm_pipe.text_encoder(
lowerCAmelCase , )
_snake_case : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : str = F.normalize(lowerCAmelCase , dim=-1)
embeds.append(lowerCAmelCase)
_snake_case , _snake_case : int = embeds
# forward
_snake_case : List[str] = audioldm_pipe(**lowerCAmelCase)
_snake_case : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase)
_snake_case : Tuple = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[Any] = """egg cracking"""
_snake_case : Optional[Any] = audioldm_pipe(**lowerCAmelCase , negative_prompt=lowerCAmelCase)
_snake_case : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 256
_snake_case : int = audio[:10]
_snake_case : Any = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def UpperCamelCase_ ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Any = self.get_dummy_components()
_snake_case : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase)
_snake_case : Tuple = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Union[str, Any] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_snake_case : List[Any] = audioldm_pipe(lowerCAmelCase , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_snake_case : Optional[Any] = 2
_snake_case : Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_snake_case : List[Any] = 2
_snake_case : Tuple = audioldm_pipe(lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_snake_case : Dict = 2
_snake_case : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Any = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = audioldm_pipe.vocoder.config.sampling_rate
_snake_case : List[str] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **lowerCAmelCase)
_snake_case : str = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) / vocoder_sampling_rate == 0.016
_snake_case : int = audioldm_pipe(audio_length_in_s=0.032 , **lowerCAmelCase)
_snake_case : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) / vocoder_sampling_rate == 0.032
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : str = self.get_dummy_components()
_snake_case : List[str] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = ["""hey"""]
_snake_case : int = audioldm_pipe(lowerCAmelCase , num_inference_steps=1)
_snake_case : Optional[int] = output.audios.shape
assert audio_shape == (1, 256)
_snake_case : Dict = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_snake_case : str = SpeechTaHifiGan(lowerCAmelCase).to(lowerCAmelCase)
_snake_case : List[str] = audioldm_pipe(lowerCAmelCase , num_inference_steps=1)
_snake_case : Dict = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCamelCase_ ( self : Dict) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCAmelCase)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase)
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]="cpu" , lowerCAmelCase : Any=torch.floataa , lowerCAmelCase : Optional[Any]=0) -> Tuple:
"""simple docstring"""
_snake_case : Any = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
_snake_case : Optional[Any] = np.random.RandomState(lowerCAmelCase).standard_normal((1, 8, 128, 16))
_snake_case : List[str] = torch.from_numpy(lowerCAmelCase).to(device=lowerCAmelCase , dtype=lowerCAmelCase)
_snake_case : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCamelCase_ ( self : Any) -> List[str]:
"""simple docstring"""
_snake_case : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""")
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Tuple = self.get_inputs(lowerCAmelCase)
_snake_case : int = 25
_snake_case : Optional[Any] = audioldm_pipe(**lowerCAmelCase).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 8_1920
_snake_case : Any = audio[7_7230:7_7240]
_snake_case : str = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315])
_snake_case : List[str] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def UpperCamelCase_ ( self : Dict) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""")
_snake_case : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Union[str, Any] = self.get_inputs(lowerCAmelCase)
_snake_case : List[str] = audioldm_pipe(**lowerCAmelCase).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 8_1920
_snake_case : Optional[Any] = audio[2_7780:2_7790]
_snake_case : int = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212])
_snake_case : List[Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 198 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class snake_case :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=True , lowerCAmelCase : Any=99 , lowerCAmelCase : Dict=32 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : str=4 , lowerCAmelCase : List[str]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[Any]=0 , ) -> int:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : int = batch_size
_snake_case : str = seq_length
_snake_case : List[str] = is_training
_snake_case : Tuple = use_input_mask
_snake_case : Optional[Any] = use_token_type_ids
_snake_case : Any = use_labels
_snake_case : str = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : Any = hidden_act
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : int = type_vocab_size
_snake_case : Dict = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Dict = num_labels
_snake_case : Optional[Any] = num_choices
_snake_case : Dict = scope
_snake_case : List[Any] = projection_dim
def UpperCamelCase_ ( self : Dict) -> List[str]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_snake_case : List[str] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
_snake_case : str = None
if self.use_token_type_ids:
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_snake_case : Tuple = None
_snake_case : Any = None
_snake_case : str = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
_snake_case : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
_snake_case : Optional[int] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = TFDPRContextEncoder(config=lowerCAmelCase)
_snake_case : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase)
_snake_case : Optional[int] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase)
_snake_case : List[str] = model(lowerCAmelCase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Optional[int] = TFDPRQuestionEncoder(config=lowerCAmelCase)
_snake_case : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase)
_snake_case : List[Any] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase)
_snake_case : Tuple = model(lowerCAmelCase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : str) -> Tuple:
"""simple docstring"""
_snake_case : Optional[Any] = TFDPRReader(config=lowerCAmelCase)
_snake_case : Any = model(lowerCAmelCase , attention_mask=lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCamelCase_ ( self : str) -> Any:
"""simple docstring"""
_snake_case : List[Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Union[str, Any] = config_and_inputs
_snake_case : Union[str, Any] = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case_ : List[str] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case_ : Tuple = False
snake_case_ : str = False
snake_case_ : Tuple = False
snake_case_ : int = False
snake_case_ : List[Any] = False
def UpperCamelCase_ ( self : Tuple) -> str:
"""simple docstring"""
_snake_case : List[str] = TFDPRModelTester(self)
_snake_case : Dict = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37)
def UpperCamelCase_ ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCAmelCase)
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCAmelCase)
def UpperCamelCase_ ( self : List[str]) -> Tuple:
"""simple docstring"""
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCAmelCase)
@slow
def UpperCamelCase_ ( self : List[str]) -> Any:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = TFDPRContextEncoder.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = TFDPRContextEncoder.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Any = TFDPRQuestionEncoder.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Union[str, Any] = TFDPRReader.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
@require_tf
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_snake_case : List[str] = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""")
_snake_case : str = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]]) # [CLS] hello, is my dog cute? [SEP]
_snake_case : Optional[int] = model(lowerCAmelCase)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_snake_case : Union[str, Any] = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4))
| 198 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=1000 , _SCREAMING_SNAKE_CASE=[3, 3, 6, 4] , _SCREAMING_SNAKE_CASE=[48, 56, 112, 220] , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = num_labels
_UpperCAmelCase = image_size
_UpperCAmelCase = layer_depths
_UpperCAmelCase = embed_dims
def UpperCAmelCase ( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_SCREAMING_SNAKE_CASE , layer_scale_init_value=1e-5 , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = SwiftFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_UpperCAmelCase = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ):
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = self.prepare_config_and_inputs()
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
__a = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__a = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def UpperCAmelCase ( self ):
_UpperCAmelCase = SwiftFormerModelTester(self )
_UpperCAmelCase = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def UpperCAmelCase ( self ):
pass
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SwiftFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def UpperCAmelCase ( self ):
pass
def UpperCAmelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 8
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
def _config_zero_init(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = copy.deepcopy(_SCREAMING_SNAKE_CASE )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1e-10 )
if isinstance(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = _config_zero_init(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return configs_no_init
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ):
pass
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) | 518 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a = False
try:
a = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = [] ):
_UpperCAmelCase = 0
_UpperCAmelCase = choices
_UpperCAmelCase = prompt
if sys.platform == "win32":
_UpperCAmelCase = """*"""
else:
_UpperCAmelCase = """➔ """
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(_SCREAMING_SNAKE_CASE )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 ):
_UpperCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_SCREAMING_SNAKE_CASE )
move_cursor(_SCREAMING_SNAKE_CASE , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def UpperCAmelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def UpperCAmelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def UpperCAmelCase ( self ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def UpperCAmelCase ( self ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_SCREAMING_SNAKE_CASE )] for number in range(10 )] )
def UpperCAmelCase ( self ):
_UpperCAmelCase = int(chr(self.current_selection ) )
_UpperCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _SCREAMING_SNAKE_CASE )
else:
return
else:
return
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_UpperCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_SCREAMING_SNAKE_CASE )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_UpperCAmelCase = int(builtins.input() )
except ValueError:
_UpperCAmelCase = default_choice
else:
_UpperCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_SCREAMING_SNAKE_CASE , """\n""" )
return choice | 518 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ):
'''simple docstring'''
if hi < 0:
UpperCAmelCase : List[Any] = len(__magic_name__ )
while lo < hi:
UpperCAmelCase : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCAmelCase : int = mid + 1
else:
UpperCAmelCase : str = mid
return lo
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ):
'''simple docstring'''
if hi < 0:
UpperCAmelCase : str = len(__magic_name__ )
while lo < hi:
UpperCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCAmelCase : Tuple = mid + 1
else:
UpperCAmelCase : List[str] = mid
return lo
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Tuple = len(__magic_name__ ) - 1
while left <= right:
UpperCAmelCase : Tuple = left + (right - left) // 2
UpperCAmelCase : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCAmelCase : Optional[int] = midpoint - 1
else:
UpperCAmelCase : Any = midpoint + 1
return None
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = bisect.bisect_left(__magic_name__ , __magic_name__ )
if index != len(__magic_name__ ) and sorted_collection[index] == item:
return index
return None
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if right < left:
return None
UpperCAmelCase : Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , __magic_name__ , midpoint - 1 )
else:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , midpoint + 1 , __magic_name__ )
if __name__ == "__main__":
a : Tuple = input("Enter numbers separated by comma:\n").strip()
a : str = sorted(int(item) for item in user_input.split(","))
a : int = int(input("Enter a single number to be found in the list:\n"))
a : Union[str, Any] = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 609 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 609 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
A__ : Dict ="\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
A__ : Optional[Any] ="\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
A__ : Any ="\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = simple_accuracy(_A , _A )
_lowerCAmelCase = float(fa_score(y_true=_A , y_pred=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = float(pearsonr(_A , _A )[0] )
_lowerCAmelCase = float(spearmanr(_A , _A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowercase__ ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] ) -> Any:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__lowercase , __lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__lowercase , __lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__lowercase , __lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 207 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = old_name
if "patch_embed" in old_name:
snake_case_ , snake_case_ , snake_case_ = old_name.split("." )
if layer == "0":
snake_case_ = old_name.replace("0" , "convolution1" )
elif layer == "1":
snake_case_ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
snake_case_ = old_name.replace("3" , "convolution2" )
else:
snake_case_ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , _A ):
snake_case_ = R"\b\d{2}\b"
if bool(re.search(_A , _A ) ):
snake_case_ = re.search(R"\d\.\d\d." , _A ).group()
else:
snake_case_ = re.search(R"\d\.\d." , _A ).group()
if int(match[0] ) < 6:
snake_case_ = old_name.replace(_A , "" )
snake_case_ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
snake_case_ = "intermediate_stages." + trimmed_name
else:
snake_case_ = old_name.replace(_A , "" )
if int(match[2] ) < num_meta4D_last_stage:
snake_case_ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
snake_case_ = str(int(match[2] ) - num_meta4D_last_stage )
snake_case_ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
snake_case_ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
snake_case_ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
snake_case_ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
snake_case_ = trimmed_name.replace("fc2" , "linear_out" )
snake_case_ = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , _A ):
snake_case_ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
snake_case_ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case_ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case_ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
snake_case_ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
snake_case_ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
snake_case_ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
snake_case_ = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case_ = new_name.replace("norm" , "layernorm" )
snake_case_ = "efficientformer." + new_name
else:
snake_case_ = "efficientformer.encoder." + new_name
return new_name
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
for key in checkpoint.copy().keys():
snake_case_ = checkpoint.pop(_A )
snake_case_ = val
return checkpoint
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(_A , stream=_A ).raw )
return image
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = torch.load(_A , map_location="cpu" )["model"]
snake_case_ = EfficientFormerConfig.from_json_file(_A )
snake_case_ = EfficientFormerForImageClassificationWithTeacher(_A )
snake_case_ = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
snake_case_ = config.depths[-1] - config.num_metaad_blocks + 1
snake_case_ = convert_torch_checkpoint(_A , _A )
model.load_state_dict(_A )
model.eval()
snake_case_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
snake_case_ = prepare_img()
snake_case_ = 256
snake_case_ = 224
snake_case_ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
snake_case_ = processor(images=_A , return_tensors="pt" ).pixel_values
# original processing pipeline
snake_case_ = Compose(
[
Resize(_A , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_A ),
ToTensor(),
Normalize(_A , _A ),
] )
snake_case_ = image_transforms(_A ).unsqueeze(0 )
assert torch.allclose(_A , _A )
snake_case_ = model(_A )
snake_case_ = outputs.logits
snake_case_ = (1, 1000)
if "l1" in model_name:
snake_case_ = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case_ = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case_ = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_A )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add model" , use_temp_dir=_A , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add image processor" , use_temp_dir=_A , )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowercase__ : Optional[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 376 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->List[str]:
_lowerCAmelCase = 1.5
_lowerCAmelCase = int(factor * num_class_images )
_lowerCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
_lowerCAmelCase = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
_lowerCAmelCase = int(factor * num_images )
_lowerCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = tqdm(desc='''downloading real regularization images''' , total=_SCREAMING_SNAKE_CASE )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
_lowerCAmelCase = class_images[count]
count += 1
try:
_lowerCAmelCase = requests.get(images['''url'''] )
if img.status_code == 2_0_0:
_lowerCAmelCase = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase__ ( )->List[str]:
_lowerCAmelCase = argparse.ArgumentParser('''''' , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_0_0 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 664 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps | 664 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = UnCLIPImageVariationPipeline
__SCREAMING_SNAKE_CASE : Tuple = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
__SCREAMING_SNAKE_CASE : List[str] = IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[str] = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self ):
return 32
@property
def a ( self ):
return 32
@property
def a ( self ):
return self.time_input_dim
@property
def a ( self ):
return self.time_input_dim * 4
@property
def a ( self ):
return 100
@property
def a ( self ):
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(snake_case )
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(snake_case )
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
snake_case_ = UnCLIPTextProjModel(**snake_case )
return model
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
snake_case_ = UNetaDConditionModel(**snake_case )
return model
@property
def a ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a ( self ):
snake_case_ = self.dummy_decoder
snake_case_ = self.dummy_text_proj
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_super_res_first
snake_case_ = self.dummy_super_res_last
snake_case_ = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
snake_case_ = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
snake_case_ = CLIPImageProcessor(crop_size=32 , size=32 )
snake_case_ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a ( self , snake_case , snake_case=0 , snake_case=True ):
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith('mps' ):
snake_case_ = torch.manual_seed(snake_case )
else:
snake_case_ = torch.Generator(device=snake_case ).manual_seed(snake_case )
if pil_image:
snake_case_ = input_image * 0.5 + 0.5
snake_case_ = input_image.clamp(0 , 1 )
snake_case_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ = DiffusionPipeline.numpy_to_pil(snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a ( self ):
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**snake_case )
snake_case_ = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
snake_case_ = self.get_dummy_inputs(snake_case , pil_image=snake_case )
snake_case_ = pipe(**snake_case )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(snake_case , pil_image=snake_case )
snake_case_ = pipe(
**snake_case , return_dict=snake_case , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self ):
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**snake_case )
snake_case_ = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
snake_case_ = self.get_dummy_inputs(snake_case , pil_image=snake_case )
snake_case_ = pipe(**snake_case )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(snake_case , pil_image=snake_case )
snake_case_ = pipe(
**snake_case , return_dict=snake_case , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self ):
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**snake_case )
snake_case_ = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
snake_case_ = self.get_dummy_inputs(snake_case , pil_image=snake_case )
snake_case_ = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
snake_case_ = pipe(**snake_case )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(snake_case , pil_image=snake_case )
snake_case_ = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
snake_case_ = pipe(
**snake_case , return_dict=snake_case , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
snake_case_ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self ):
snake_case_ = torch.device('cpu' )
class lowercase :
__SCREAMING_SNAKE_CASE : Any = 1
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**snake_case )
snake_case_ = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
snake_case_ = torch.Generator(device=snake_case ).manual_seed(0 )
snake_case_ = pipe.decoder.dtype
snake_case_ = 1
snake_case_ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case_ = pipe.prepare_latents(
snake_case , dtype=snake_case , device=snake_case , generator=snake_case , latents=snake_case , scheduler=DummyScheduler() )
snake_case_ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case_ = pipe.prepare_latents(
snake_case , dtype=snake_case , device=snake_case , generator=snake_case , latents=snake_case , scheduler=DummyScheduler() )
snake_case_ = self.get_dummy_inputs(snake_case , pil_image=snake_case )
snake_case_ = pipe(
**snake_case , decoder_latents=snake_case , super_res_latents=snake_case ).images
snake_case_ = self.get_dummy_inputs(snake_case , pil_image=snake_case )
# Don't pass image, instead pass embedding
snake_case_ = pipeline_inputs.pop('image' )
snake_case_ = pipe.image_encoder(snake_case ).image_embeds
snake_case_ = pipe(
**snake_case , decoder_latents=snake_case , super_res_latents=snake_case , image_embeddings=snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def a ( self ):
snake_case_ = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case_ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=snake_case , expected_max_diff=snake_case )
@skip_mps
def a ( self ):
snake_case_ = torch_device == 'cpu'
snake_case_ = True
snake_case_ = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=snake_case , relax_max_difference=snake_case , additional_params_copy_to_batched_inputs=snake_case , )
def a ( self ):
snake_case_ = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case_ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=snake_case , additional_params_copy_to_batched_inputs=snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=snake_case )
@skip_mps
def a ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self ):
return super().test_save_load_local()
@skip_mps
def a ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
snake_case_ = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ = pipeline(
snake_case , generator=snake_case , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(snake_case , snake_case , 15 )
| 362 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = BertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 362 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCamelCase__ : Tuple = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ : Any = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCamelCase__ : Tuple = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
UpperCamelCase__ : Dict = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCamelCase__ : List[str] = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCamelCase__ : List[Any] = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _lowerCamelCase )
return [m.group(0 ) for m in matches]
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__SCREAMING_SNAKE_CASE : Dict = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__SCREAMING_SNAKE_CASE : List[str] = collections.defaultdict(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = collections.defaultdict(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = collections.defaultdict(_lowerCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = None
if _re_tf_models.match(_lowerCamelCase ) is not None:
__SCREAMING_SNAKE_CASE : Dict = tf_models
__SCREAMING_SNAKE_CASE : str = _re_tf_models.match(_lowerCamelCase ).groups()[0]
elif _re_flax_models.match(_lowerCamelCase ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = flax_models
__SCREAMING_SNAKE_CASE : List[Any] = _re_flax_models.match(_lowerCamelCase ).groups()[0]
elif _re_pt_models.match(_lowerCamelCase ) is not None:
__SCREAMING_SNAKE_CASE : Tuple = pt_models
__SCREAMING_SNAKE_CASE : Union[str, Any] = _re_pt_models.match(_lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
__SCREAMING_SNAKE_CASE : List[str] = True
break
# Try again after removing the last word in the name
__SCREAMING_SNAKE_CASE : Any = """""".join(camel_case_split(_lowerCamelCase )[:-1] )
__SCREAMING_SNAKE_CASE : List[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__SCREAMING_SNAKE_CASE : Optional[int] = list(_lowerCamelCase )
all_models.sort()
__SCREAMING_SNAKE_CASE : int = {"""model_type""": all_models}
__SCREAMING_SNAKE_CASE : Dict = [pt_models[t] for t in all_models]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [tf_models[t] for t in all_models]
__SCREAMING_SNAKE_CASE : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__SCREAMING_SNAKE_CASE : Optional[int] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__SCREAMING_SNAKE_CASE : Dict = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__SCREAMING_SNAKE_CASE : str = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__SCREAMING_SNAKE_CASE : Optional[int] = """AutoTokenizer"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = [processors[t] for t in all_models]
return pd.DataFrame(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : Any = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
__SCREAMING_SNAKE_CASE : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowerCamelCase , _lowerCamelCase ):
continue
# First extract all model_names
__SCREAMING_SNAKE_CASE : Tuple = []
for name in getattr(_lowerCamelCase , _lowerCamelCase ).values():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
model_names.append(_lowerCamelCase )
else:
model_names.extend(list(_lowerCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Tuple = get_frameworks_table()
__SCREAMING_SNAKE_CASE : List[str] = Dataset.from_pandas(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = Dataset.from_json(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowerCamelCase ) )
}
__SCREAMING_SNAKE_CASE : Tuple = update_pipeline_and_auto_class_table(_lowerCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(table.keys() )
__SCREAMING_SNAKE_CASE : Any = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
__SCREAMING_SNAKE_CASE : str = Dataset.from_pandas(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowerCamelCase , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowerCamelCase , """pipeline_tags.json""" ) )
if commit_sha is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
__SCREAMING_SNAKE_CASE : Any = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=_lowerCamelCase , repo_type="""dataset""" , token=_lowerCamelCase , commit_message=_lowerCamelCase , )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__SCREAMING_SNAKE_CASE : Any = transformers_module.pipelines.SUPPORTED_TASKS
__SCREAMING_SNAKE_CASE : Optional[int] = []
for key in pipeline_tasks:
if key not in in_table:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline_tasks[key]["""pt"""]
if isinstance(_lowerCamelCase , (list, tuple) ):
__SCREAMING_SNAKE_CASE : int = model[0]
__SCREAMING_SNAKE_CASE : Any = model.__name__
if model not in in_table.values():
missing.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = """, """.join(_lowerCamelCase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
UpperCamelCase__ : List[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha) | 178 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if hor == 1_28:
__SCREAMING_SNAKE_CASE : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__SCREAMING_SNAKE_CASE : List[Any] = (32, 1_28, 2_56)
__SCREAMING_SNAKE_CASE : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__SCREAMING_SNAKE_CASE : str = (32, 64, 1_28, 2_56)
__SCREAMING_SNAKE_CASE : Tuple = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__SCREAMING_SNAKE_CASE : Any = model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__SCREAMING_SNAKE_CASE : int = UNetaDModel(**_lowerCamelCase )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_lowerCamelCase )
hf_value_function.load_state_dict(_lowerCamelCase )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Dict = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__SCREAMING_SNAKE_CASE : Optional[int] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__SCREAMING_SNAKE_CASE : Dict = model
__SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel(**_lowerCamelCase )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__SCREAMING_SNAKE_CASE : str = state_dict.pop(_lowerCamelCase )
hf_value_function.load_state_dict(_lowerCamelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 178 | 1 |
def _snake_case (__lowercase):
UpperCamelCase_ = [0] * len(__lowercase)
UpperCamelCase_ = []
UpperCamelCase_ = [1] * len(__lowercase)
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowercase)):
if indegree[i] == 0:
queue.append(__lowercase)
while queue:
UpperCamelCase_ = queue.pop(0)
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowercase)
print(max(__lowercase))
# Adjacency list of Graph
snake_case__ : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 23 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self : Dict ):
_a = 1
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def UpperCamelCase__ ( self : Dict ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__a )
@property
def UpperCamelCase__ ( self : str ):
def extract(*__a : Tuple , **__a : str ):
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict ):
_a = torch.ones([0] )
def UpperCamelCase__ ( self : List[str] , __a : Dict ):
self.pixel_values.to(__a )
return self
return Out()
return extract
def UpperCamelCase__ ( self : Optional[int] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=__a )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_a = 77
_a = self.dummy_image.to(__a )
_a = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_a = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
_a = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
_a = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
_a = "A painting of a squirrel eating a burger"
_a = torch.Generator(device=__a ).manual_seed(0 )
_a = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , )
_a = output.images
_a = torch.Generator(device=__a ).manual_seed(0 )
_a = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , return_dict=__a , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=__a )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_a = 77
_a = self.dummy_image.to(__a )
# put models in fp16
_a = unet.half()
_a = vae.half()
_a = bert.half()
# make sure here that pndm scheduler skips prk
_a = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
_a = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
_a = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = alt_pipe(
[prompt] , generator=__a , num_inference_steps=2 , output_type="np" , image=__a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_a = init_image.resize((7_60, 5_04) )
_a = "BAAI/AltDiffusion"
_a = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = "A fantasy landscape, trending on artstation"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , )
_a = output.images[0]
_a = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_a = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_a = init_image.resize((7_68, 5_12) )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_a = "BAAI/AltDiffusion"
_a = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = "A fantasy landscape, trending on artstation"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 692 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
lowerCAmelCase__ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCAmelCase__ = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCAmelCase__ = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def lowercase__ ( self , snake_case__=None , snake_case__=None , snake_case__=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(A_ , A_ )["wer"]
else:
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
for prediction, reference in zip(A_ , A_ ):
lowerCAmelCase : Optional[Any] = compute_measures(A_ , A_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 700 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 0 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class __snake_case :
"""simple docstring"""
def __init__( self :Tuple , UpperCamelCase__ :int , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Union[str, Any] = True , UpperCamelCase__ :int = False ):
_a = scheduler
_a = optimizers if isinstance(UpperCamelCase__ , (list, tuple) ) else [optimizers]
_a = split_batches
_a = step_with_optimizer
_a = GradientState()
def SCREAMING_SNAKE_CASE_ ( self :List[str] , *UpperCamelCase__ :Optional[int] , **UpperCamelCase__ :Any ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase__ , **UpperCamelCase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase__ , **UpperCamelCase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_a = AcceleratorState().num_processes
for _ in range(UpperCamelCase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase__ , **UpperCamelCase__ )
else:
self.scheduler.step(*UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :Tuple ):
self.scheduler.load_state_dict(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE_ ( self :List[str] , *UpperCamelCase__ :int , **UpperCamelCase__ :Optional[int] ):
return self.scheduler.print_lr(*UpperCamelCase__ , **UpperCamelCase__ )
| 388 |
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number_a + number_a) / 2)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(_UpperCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(_UpperCAmelCase) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''')
print(F'''details : {last_numbers!s}''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip())
guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__magic_name__ : Dict = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__magic_name__ : Tuple = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__magic_name__ : Optional[int] = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = CHRF.CHAR_ORDER , lowerCamelCase = CHRF.WORD_ORDER , lowerCamelCase = CHRF.BETA , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , ):
_snake_case = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_snake_case = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
_snake_case = CHRF(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_snake_case = sb_chrf.corpus_score(__UpperCamelCase , __UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 715 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
# initialize interval's left pointer and right pointer
_snake_case , _snake_case = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_snake_case = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_snake_case = min_edge
while go_next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_snake_case , _snake_case = i, i + z_result[i] - 1
return z_result
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE__ ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_snake_case = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self ):
'''simple docstring'''
self.test()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = 0
__a : Optional[int] = False
while not completed:
if counter == 1:
self.reset()
__a : Optional[Any] = self.advance()
if not self.does_advance(_lowercase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__a , __a , __a : Optional[Any] = self.update(_lowercase )
counter += 1
if counter > 10000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def lowerCAmelCase__(self ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase__(self ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase__(self ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase__(self , _lowercase=False ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase ):
'''simple docstring'''
super(_lowercase , self ).__init__()
if not isinstance(_lowercase , _lowercase ) or len(_lowercase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_lowercase , _lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__a : List[Any] = token_ids
__a : Tuple = len(self.token_ids )
__a : Optional[Any] = -1 # the index of the currently fulfilled step
__a : int = False
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowercase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowercase )}''' )
__a : Dict = False
__a : Optional[int] = False
__a : Dict = False
if self.does_advance(_lowercase ):
self.fulfilled_idx += 1
__a : Optional[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
__a : Optional[Any] = True
__a : Union[str, Any] = completed
else:
# failed to make progress.
__a : Union[str, Any] = True
self.reset()
return stepped, completed, reset
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = False
__a : str = 0
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCAmelCase__(self , _lowercase=False ):
'''simple docstring'''
__a : Any = PhrasalConstraint(self.token_ids )
if stateful:
__a : int = self.seqlen
__a : List[str] = self.fulfilled_idx
__a : Optional[int] = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=True ):
'''simple docstring'''
__a : Optional[Any] = max([len(_lowercase ) for one in nested_token_ids] )
__a : Optional[Any] = {}
for token_ids in nested_token_ids:
__a : Any = root
for tidx, token_id in enumerate(_lowercase ):
if token_id not in level:
__a : Union[str, Any] = {}
__a : Optional[Any] = level[token_id]
if no_subsets and self.has_subsets(_lowercase , _lowercase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
F''' {nested_token_ids}.''' )
__a : Union[str, Any] = root
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : str = self.trie
for current_token in current_seq:
__a : int = start[current_token]
__a : Union[str, Any] = list(start.keys() )
return next_tokens
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Dict = self.next_tokens(_lowercase )
return len(_lowercase ) == 0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = list(root.values() )
if len(_lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowercase ) for nn in next_nodes] )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = self.count_leaves(_lowercase )
return len(_lowercase ) != leaf_count
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase ):
'''simple docstring'''
super(_lowercase , self ).__init__()
if not isinstance(_lowercase , _lowercase ) or len(_lowercase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_lowercase , _lowercase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_lowercase , _lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__a : List[str] = DisjunctiveTrie(_lowercase )
__a : Dict = nested_token_ids
__a : List[Any] = self.trie.max_height
__a : int = []
__a : Any = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.trie.next_tokens(self.current_seq )
if len(_lowercase ) == 0:
return None
else:
return token_list
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowercase )}''' )
__a : Optional[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowercase )}''' )
__a : Union[str, Any] = False
__a : List[Any] = False
__a : Optional[int] = False
if self.does_advance(_lowercase ):
self.current_seq.append(_lowercase )
__a : List[Any] = True
else:
__a : List[str] = True
self.reset()
__a : str = self.trie.reached_leaf(self.current_seq )
__a : Any = completed
return stepped, completed, reset
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = False
__a : Optional[Any] = []
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCAmelCase__(self , _lowercase=False ):
'''simple docstring'''
__a : Union[str, Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
__a : Optional[Any] = self.seqlen
__a : Any = self.current_seq
__a : List[str] = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = constraints
# max # of steps required to fulfill a given constraint
__a : List[str] = max([c.seqlen for c in constraints] )
__a : Optional[int] = len(_lowercase )
__a : int = False
self.init_state()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = []
__a : str = None
__a : Any = [constraint.copy(stateful=_lowercase ) for constraint in self.constraints]
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__a : str = constraint.advance()
if isinstance(_lowercase , _lowercase ):
token_list.append(_lowercase )
elif isinstance(_lowercase , _lowercase ):
token_list.extend(_lowercase )
else:
__a : Optional[Any] = self.inprogress_constraint.advance()
if isinstance(_lowercase , _lowercase ):
token_list.append(_lowercase )
elif isinstance(_lowercase , _lowercase ):
token_list.extend(_lowercase )
if len(_lowercase ) == 0:
return None
else:
return token_list
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__a , __a : Optional[Any] = self.add(_lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__a , __a : Union[str, Any] = False, False
if self.completed:
__a : Optional[Any] = True
__a : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__a , __a , __a : Any = self.inprogress_constraint.update(_lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowercase ) )
__a : Optional[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__a : Dict = None
if len(self.pending_constraints ) == 0:
# we're done!
__a : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowercase ):
__a , __a , __a : Optional[int] = pending_constraint.update(_lowercase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_lowercase )
__a : Any = None
if not complete and stepped:
__a : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__a : Tuple = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__a : Optional[Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCAmelCase__(self , _lowercase=True ):
'''simple docstring'''
__a : int = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__a : List[str] = [
constraint.copy(stateful=_lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__a : List[str] = self.inprogress_constraint.copy(stateful=_lowercase )
__a : Dict = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 581 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowercase__ = {
"facebook/m2m100_418M": 1024,
}
# fmt: off
lowercase__ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__(self , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<pad>" , _lowercase="<unk>" , _lowercase="m2m100" , _lowercase = None , _lowercase=8 , **_lowercase , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__a : List[str] = language_codes
__a : str = FAIRSEQ_LANGUAGE_CODES[language_codes]
__a : Optional[int] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
__a : Optional[int] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(_lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowercase , tgt_lang=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , language_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_lowercase , **_lowercase , )
__a : Optional[Any] = vocab_file
__a : List[Any] = load_json(_lowercase )
__a : List[str] = {v: k for k, v in self.encoder.items()}
__a : List[Any] = spm_file
__a : int = load_spm(_lowercase , self.sp_model_kwargs )
__a : Dict = len(self.encoder )
__a : Optional[int] = {
self.get_lang_token(_lowercase ): self.encoder_size + i for i, lang_code in enumerate(_lowercase )
}
__a : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_lowercase )}
__a : Tuple = {v: k for k, v in self.lang_token_to_id.items()}
__a : List[str] = src_lang if src_lang is not None else """en"""
__a : List[Any] = tgt_lang
__a : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__a : Optional[Any] = num_madeup_words
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_lowercase , self.encoder[self.unk_token] )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_lowercase , self.unk_token )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Any = []
__a : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase ) + token
__a : Optional[Any] = []
else:
current_sub_tokens.append(_lowercase )
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
__a : str = [1] * len(self.prefix_tokens )
__a : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
__a : Optional[Any] = self.__dict__.copy()
__a : List[str] = None
return state
def __setstate__(self , _lowercase ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a : List[str] = {}
__a : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Tuple = Path(_lowercase )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
__a : Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__a : List[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowercase )
elif not os.path.isfile(self.spm_file ):
with open(_lowercase , """wb""" ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (str(_lowercase ), str(_lowercase ))
def lowerCAmelCase__(self , _lowercase , _lowercase = "en" , _lowercase = None , _lowercase = "ro" , **_lowercase , ):
'''simple docstring'''
__a : Dict = src_lang
__a : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__a : Dict = src_lang
__a : List[str] = self(_lowercase , add_special_tokens=_lowercase , **_lowercase )
__a : Union[str, Any] = self.get_lang_id(_lowercase )
__a : Tuple = tgt_lang_id
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : str = self.get_lang_token(_lowercase )
__a : Union[str, Any] = self.lang_token_to_id[lang_token]
__a : Optional[Any] = [self.cur_lang_id]
__a : List[str] = [self.eos_token_id]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Dict = self.get_lang_token(_lowercase )
__a : Union[str, Any] = self.lang_token_to_id[lang_token]
__a : Tuple = [self.cur_lang_id]
__a : Dict = [self.eos_token_id]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = self.get_lang_token(_lowercase )
return self.lang_token_to_id[lang_token]
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ):
__a : Optional[int] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def __magic_name__ ( _lowerCamelCase : str ):
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ):
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 581 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ = logging.get_logger(__name__)
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = b.T
_UpperCAmelCase = np.sum(np.square(lowerCamelCase_ ) , axis=1 )
_UpperCAmelCase = np.sum(np.square(lowerCamelCase_ ) , axis=0 )
_UpperCAmelCase = np.matmul(lowerCamelCase_ , lowerCamelCase_ )
_UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def A ( _UpperCAmelCase : int , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = x.reshape(-1 , 3 )
_UpperCAmelCase = squared_euclidean_distance(lowerCamelCase_ , lowerCamelCase_ )
return np.argmin(lowerCamelCase_ , axis=1 )
class __lowerCAmelCase ( __lowerCamelCase ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : str , A : Optional[Union[List[List[int]], np.ndarray]] = None , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : bool = True , **A : List[Any] , ) -> str:
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
_UpperCAmelCase = size if size is not None else {'height': 2_56, 'width': 2_56}
_UpperCAmelCase = get_size_dict(UpperCAmelCase_)
_UpperCAmelCase = np.array(UpperCAmelCase_) if clusters is not None else None
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_color_quantize
def _lowerCamelCase ( self : str , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BILINEAR , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
UpperCAmelCase_ , size=(size['height'], size['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def _lowerCamelCase ( self : str , A : np.ndarray , A : Optional[Union[str, ChannelDimension]] = None , ) -> str:
"""simple docstring"""
_UpperCAmelCase = rescale(image=UpperCAmelCase_ , scale=1 / 1_27.5 , data_format=UpperCAmelCase_)
_UpperCAmelCase = image - 1
return image
def _lowerCamelCase ( self : Dict , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Optional[bool] = None , A : Optional[Union[List[List[int]], np.ndarray]] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **A : List[str] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(UpperCAmelCase_)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCAmelCase = clusters if clusters is not None else self.clusters
_UpperCAmelCase = np.array(UpperCAmelCase_)
_UpperCAmelCase = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=UpperCAmelCase_) for image in images]
if do_color_quantize:
_UpperCAmelCase = [to_channel_dimension_format(UpperCAmelCase_ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCAmelCase = np.array(UpperCAmelCase_)
_UpperCAmelCase = color_quantize(UpperCAmelCase_ , UpperCAmelCase_).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
_UpperCAmelCase = images.shape[0]
_UpperCAmelCase = images.reshape(UpperCAmelCase_ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCAmelCase = list(UpperCAmelCase_)
else:
_UpperCAmelCase = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
_UpperCAmelCase = {'input_ids': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 706 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = SwinConfig(image_size=192 )
if "base" in model_name:
__SCREAMING_SNAKE_CASE: List[Any] = 6
__SCREAMING_SNAKE_CASE: Tuple = 128
__SCREAMING_SNAKE_CASE: List[str] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE: Union[str, Any] = (4, 8, 16, 32)
elif "large" in model_name:
__SCREAMING_SNAKE_CASE: List[str] = 12
__SCREAMING_SNAKE_CASE: Tuple = 192
__SCREAMING_SNAKE_CASE: Optional[int] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE: List[Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__SCREAMING_SNAKE_CASE: Optional[Any] = window_size
__SCREAMING_SNAKE_CASE: int = embed_dim
__SCREAMING_SNAKE_CASE: Optional[Any] = depths
__SCREAMING_SNAKE_CASE: Union[str, Any] = num_heads
return config
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
if "encoder.mask_token" in name:
__SCREAMING_SNAKE_CASE: Any = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE: Optional[Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE: int = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE: str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__SCREAMING_SNAKE_CASE: str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__SCREAMING_SNAKE_CASE: str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__SCREAMING_SNAKE_CASE: Dict = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE: Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE: Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__SCREAMING_SNAKE_CASE: Optional[int] = '''layernorm.weight'''
if name == "encoder.norm.bias":
__SCREAMING_SNAKE_CASE: Optional[int] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__SCREAMING_SNAKE_CASE: int = '''swin.''' + name
return name
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE: Optional[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__SCREAMING_SNAKE_CASE: Optional[Any] = key.split('''.''' )
__SCREAMING_SNAKE_CASE: List[Any] = int(key_split[2] )
__SCREAMING_SNAKE_CASE: int = int(key_split[4] )
__SCREAMING_SNAKE_CASE: List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE: List[str] = val[:dim, :]
__SCREAMING_SNAKE_CASE: Optional[Any] = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE: Tuple = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE: Any = val[
:dim
]
__SCREAMING_SNAKE_CASE: Union[str, Any] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE: int = val[
-dim:
]
else:
__SCREAMING_SNAKE_CASE: List[str] = val
return orig_state_dict
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
__SCREAMING_SNAKE_CASE: Union[str, Any] = get_swin_config(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: int = SwinForMaskedImageModeling(UpperCamelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE: str = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE: int = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__SCREAMING_SNAKE_CASE: Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__SCREAMING_SNAKE_CASE: Dict = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Optional[int] = model(**UpperCamelCase__ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 202 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a ( __lowercase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: Any = Generator(
cache_dir=_lowerCAmelCase , features=_lowerCAmelCase , generator=_lowerCAmelCase , gen_kwargs=_lowerCAmelCase , **_lowerCAmelCase , )
def snake_case_ ( self ):
"""simple docstring"""
if self.streaming:
__SCREAMING_SNAKE_CASE: List[str] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE: str = None
__SCREAMING_SNAKE_CASE: List[Any] = None
__SCREAMING_SNAKE_CASE: Tuple = None
__SCREAMING_SNAKE_CASE: Optional[Any] = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE: List[str] = self.builder.as_dataset(
split='''train''' , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 202 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
snake_case_ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
snake_case_ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = """ Hello world! cécé herlolip"""
snake_case_ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def _lowerCamelCase( UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
A : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCamelCase( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> Any:
A : List[str] = dct.pop(UpperCamelCase__ )
A : Optional[int] = val
def _lowerCamelCase( UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
A : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
A : Dict = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _lowerCamelCase( UpperCamelCase__ : int ) -> Any:
A, A : Dict = emb.weight.shape
A : Dict = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A : str = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=None ) -> Any:
if not os.path.exists(UpperCamelCase__ ):
A : Tuple = torch.hub.load('''pytorch/fairseq''' , UpperCamelCase__ ).eval()
else:
A : Optional[Any] = load_xsum_checkpoint(UpperCamelCase__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
A : Union[str, Any] = checkpoint_path.replace('''.''' , '''-''' )
A : Optional[Any] = BartConfig.from_pretrained(UpperCamelCase__ )
A : Any = bart.encode(UpperCamelCase__ ).unsqueeze(0 )
A : Optional[Any] = BartTokenizer.from_pretrained(UpperCamelCase__ ).encode(UpperCamelCase__ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(UpperCamelCase__ , UpperCamelCase__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
A : List[str] = bart.state_dict()
remove_ignore_keys_(UpperCamelCase__ )
A : Tuple = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A : Dict = BartForSequenceClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
A : str = bart.predict('''mnli''' , UpperCamelCase__ , return_logits=UpperCamelCase__ )
A : Tuple = model(UpperCamelCase__ )[0] # logits
else: # no classification heads to worry about
A : Tuple = bart.model.state_dict()
remove_ignore_keys_(UpperCamelCase__ )
A : int = state_dict['''decoder.embed_tokens.weight''']
A : Optional[int] = bart.extract_features(UpperCamelCase__ )
if hf_checkpoint_name == "facebook/bart-large":
A : List[Any] = BartModel(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
A : str = model(UpperCamelCase__ ).model[0]
else:
A : Dict = BartForConditionalGeneration(UpperCamelCase__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(UpperCamelCase__ )
if hasattr(UpperCamelCase__ , '''lm_head''' ):
A : List[Any] = make_linear_from_emb(model.model.shared )
A : Tuple = model.model(UpperCamelCase__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
snake_case_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 537 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case_ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 537 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"Building PyTorch model from configuration: {config}" )
UpperCAmelCase = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
UpperCAmelCase = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 51 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
snake_case__ = None
snake_case__ = logging.get_logger(__name__)
snake_case__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
snake_case__ = {
"""google/fnet-base""": 5_12,
"""google/fnet-large""": 5_12,
}
snake_case__ = """▁"""
class UpperCAmelCase ( __lowerCamelCase ):
a__: Dict = VOCAB_FILES_NAMES
a__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__: Any = ["""input_ids""", """token_type_ids"""]
a__: Tuple = FNetTokenizer
def __init__( self : int , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : Tuple="[SEP]" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : List[str]="[CLS]" , lowerCAmelCase : Dict="[MASK]" , **lowerCAmelCase : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Tuple = (
AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase , normalized=lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase )
else mask_token
)
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , )
lowercase : List[Any] = do_lower_case
lowercase : Tuple = remove_space
lowercase : Optional[Any] = keep_accents
lowercase : Tuple = vocab_file
lowercase : Any = False if not self.vocab_file else True
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
lowercase : int = [self.sep_token_id]
lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
lowercase : List[str] = [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Union[str, Any] = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
return (out_vocab_file,)
| 583 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
lowerCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" )
return image
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = val
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
lowerCAmelCase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE ), v_bias) )
lowerCAmelCase = qkv_bias
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = 364 if """coco""" in model_name else 224
lowerCAmelCase = BlipaVisionConfig(image_size=_SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
lowerCAmelCase = BlipaConfig(vision_config=_SCREAMING_SNAKE_CASE , text_config=_SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
lowerCAmelCase = tokenizer("""\n""" , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
lowerCAmelCase, lowerCAmelCase = get_blipa_config(_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = BlipaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
lowerCAmelCase, lowerCAmelCase = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = load_model_and_preprocess(
name=_SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , is_eval=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowerCAmelCase = original_model.state_dict()
lowerCAmelCase = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith("""Qformer.bert""" ):
lowerCAmelCase = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowerCAmelCase = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
lowerCAmelCase = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
lowerCAmelCase = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
lowerCAmelCase = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
lowerCAmelCase = key.replace("""t5""" , """language""" )
lowerCAmelCase = val
# read in qv biases
read_in_q_v_bias(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase, lowerCAmelCase = hf_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase = load_demo_image()
lowerCAmelCase = vis_processors["""eval"""](_SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_SCREAMING_SNAKE_CASE )
# create processor
lowerCAmelCase = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = BlipaProcessor(image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values.to(_SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
original_model.to(_SCREAMING_SNAKE_CASE )
hf_model.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
lowerCAmelCase = hf_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).logits
else:
lowerCAmelCase = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase = hf_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_SCREAMING_SNAKE_CASE )
else:
# cast to same type
lowerCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
lowerCAmelCase = """"""
lowerCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids.to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = original_model.generate({"""image""": original_pixel_values} )
lowerCAmelCase = hf_model.generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = input_ids.shape[1]
lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [text.strip() for text in output_text]
print("""HF generation:""" , _SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 344 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 344 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase : List[str] = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(a )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'rag'
_UpperCamelCase = True
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=" / " ,_lowerCAmelCase=" // " ,_lowerCAmelCase=5 ,_lowerCAmelCase=3_00 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=8 ,_lowerCAmelCase="wiki_dpr" ,_lowerCAmelCase="train" ,_lowerCAmelCase="compressed" ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
bos_token_id=_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,forced_eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,prefix=_lowerCAmelCase ,vocab_size=_lowerCAmelCase ,**_lowerCAmelCase ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCamelCase__ = kwargs.pop("""question_encoder""" )
lowerCamelCase__ = question_encoder_config.pop("""model_type""" )
lowerCamelCase__ = kwargs.pop("""generator""" )
lowerCamelCase__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = reduce_loss
lowerCamelCase__ = label_smoothing
lowerCamelCase__ = exclude_bos_score
lowerCamelCase__ = do_marginalize
lowerCamelCase__ = title_sep
lowerCamelCase__ = doc_sep
lowerCamelCase__ = n_docs
lowerCamelCase__ = max_combined_length
lowerCamelCase__ = dataset
lowerCamelCase__ = dataset_split
lowerCamelCase__ = index_name
lowerCamelCase__ = retrieval_vector_size
lowerCamelCase__ = retrieval_batch_size
lowerCamelCase__ = passages_path
lowerCamelCase__ = index_path
lowerCamelCase__ = use_dummy_dataset
lowerCamelCase__ = output_retrieved
lowerCamelCase__ = do_deduplication
lowerCamelCase__ = use_cache
if self.forced_eos_token_id is None:
lowerCamelCase__ = getattr(self.generator ,"""forced_eos_token_id""" ,_lowerCAmelCase )
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.question_encoder.to_dict()
lowerCamelCase__ = self.generator.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 50 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
__A : Tuple = TOKENIZER_CLASSES
else:
__A : Dict = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + "Fast" )}
logger.info(f"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
__A : int = TOKENIZER_CLASSES[tokenizer_name]
__A : Union[str, Any] = True
if checkpoint_name is None:
__A : Dict = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__A : str = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
__A : List[str] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
__A ,__A : Optional[Any] = checkpoint.split("/" )
__A : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__A : Optional[int] = checkpoint
__A : Any = dump_path
else:
__A : int = None
__A : Optional[int] = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__A : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__A : Optional[int] = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
__A : Tuple = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(f"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(f"=> removing {file_name}" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
_UpperCamelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 111 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
SCREAMING_SNAKE_CASE_ = timm.create_model("levit_128s" , pretrained=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = timm.create_model("levit_128" , pretrained=__UpperCamelCase )
if hidden_sizes == 1_9_2:
SCREAMING_SNAKE_CASE_ = timm.create_model("levit_192" , pretrained=__UpperCamelCase )
if hidden_sizes == 2_5_6:
SCREAMING_SNAKE_CASE_ = timm.create_model("levit_256" , pretrained=__UpperCamelCase )
if hidden_sizes == 3_8_4:
SCREAMING_SNAKE_CASE_ = timm.create_model("levit_384" , pretrained=__UpperCamelCase )
from_model.eval()
SCREAMING_SNAKE_CASE_ = LevitForImageClassificationWithTeacher(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE_ = OrderedDict()
SCREAMING_SNAKE_CASE_ = from_model.state_dict()
SCREAMING_SNAKE_CASE_ = list(from_model.state_dict().keys() )
SCREAMING_SNAKE_CASE_ = list(our_model.state_dict().keys() )
print(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for i in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ = weights[og_keys[i]]
our_model.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = torch.randn((2, 3, 2_2_4, 2_2_4) )
SCREAMING_SNAKE_CASE_ = from_model(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = our_model(__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase , __UpperCamelCase ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ = name
print(__UpperCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
SCREAMING_SNAKE_CASE_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True ):
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = (1, num_labels)
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = partial(__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = {
"levit-128S": 1_2_8,
"levit-128": 1_2_8,
"levit-192": 1_9_2,
"levit-256": 2_5_6,
"levit-384": 3_8_4,
}
SCREAMING_SNAKE_CASE_ = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __UpperCamelCase , names_to_config[model_name] , __UpperCamelCase , __UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
A : List[str] = parser.parse_args()
A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 710 | from __future__ import annotations
def a__ ( __UpperCamelCase ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Dict:
_a = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_a = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_a = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
_a = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_a = model(_lowerCamelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1E-3 ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_a = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_a = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
_a = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_a = model(_lowerCamelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1E-3 ) )
| 131 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A_ ( __lowercase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def A_ ( __lowercase ):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase_ : Union[str, Any] =ord(__lowercase )
if not _is_chinese_char(__lowercase ):
return 0
return 1
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =set()
for token in tokens:
UpperCamelCase_ : Optional[int] =len(__lowercase ) > 1 and is_chinese(__lowercase )
if chinese_word:
word_set.add(__lowercase )
UpperCamelCase_ : Tuple =list(__lowercase )
return word_list
def A_ ( __lowercase , __lowercase ):
if not chinese_word_set:
return bert_tokens
UpperCamelCase_ : List[str] =max([len(__lowercase ) for w in chinese_word_set] )
UpperCamelCase_ : Optional[Any] =bert_tokens
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =0, len(__lowercase )
while start < end:
UpperCamelCase_ : str =True
if is_chinese(bert_word[start] ):
UpperCamelCase_ : Optional[int] =min(end - start , __lowercase )
for i in range(__lowercase , 1 , -1 ):
UpperCamelCase_ : Tuple =''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase_ : Tuple ='##' + bert_word[j]
UpperCamelCase_ : int =start + i
UpperCamelCase_ : Dict =False
break
if single_word:
start += 1
return bert_word
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Tuple =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : Union[str, Any] =ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
UpperCamelCase_ : int =[get_chinese_word(__lowercase ) for r in res]
ltp_res.extend(__lowercase )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : int =bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowercase , truncation=__lowercase , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for input_ids, chinese_word in zip(__lowercase , __lowercase ):
UpperCamelCase_ : List[str] =[]
for id in input_ids:
UpperCamelCase_ : Union[str, Any] =bert_tokenizer._convert_id_to_token(__lowercase )
input_tokens.append(__lowercase )
UpperCamelCase_ : Optional[int] =add_sub_symbol(__lowercase , __lowercase )
UpperCamelCase_ : Dict =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowercase ):
if token[:2] == "##":
UpperCamelCase_ : Optional[int] =token[2:]
# save chinese tokens' pos
if len(__lowercase ) == 1 and _is_chinese_char(ord(__lowercase ) ):
ref_id.append(__lowercase )
ref_ids.append(__lowercase )
assert len(__lowercase ) == len(__lowercase )
return ref_ids
def A_ ( __lowercase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =f.readlines()
UpperCamelCase_ : Optional[int] =[line.strip() for line in data if len(__lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase_ : Optional[Any] =LTP(args.ltp ) # faster in GPU device
UpperCamelCase_ : Dict =BertTokenizer.from_pretrained(args.bert )
UpperCamelCase_ : int =prepare_ref(__lowercase , __lowercase , __lowercase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =[json.dumps(__lowercase ) + '\n' for ref in ref_ids]
f.writelines(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 357 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Tuple ={
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =[
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] =[
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =[
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 701 |
from numpy import exp, pi, sqrt
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _snake_case ( A , A=False ) -> Optional[Any]:
try:
lowerCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase__ = strtobool(A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def _snake_case ( A ) -> List[Any]:
return unittest.skip('''Test was skipped''' )(A )
def _snake_case ( A ) -> Tuple:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(A )
def _snake_case ( A ) -> Any:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(A )
def _snake_case ( A ) -> str:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(A )
def _snake_case ( A ) -> str:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(A )
def _snake_case ( A ) -> Dict:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(A )
def _snake_case ( A ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(A )
def _snake_case ( A ) -> Dict:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(A )
def _snake_case ( A ) -> Optional[Any]:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(A )
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(A )
def _snake_case ( A ) -> List[str]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(A )
def _snake_case ( A ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(A )
def _snake_case ( A ) -> str:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(A )
def _snake_case ( A ) -> Any:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(A )
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(A )
def _snake_case ( A ) -> Union[str, Any]:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(A )
def _snake_case ( A=None , A=None ) -> Union[str, Any]:
if test_case is None:
return partial(A , version=A )
return unittest.skipUnless(is_torch_version('''>=''' , A ) , F"""test requires torch version >= {version}""" )(A )
def _snake_case ( A ) -> List[str]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(A )
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(A )
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(A )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(A )
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = True
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> str:
lowerCAmelCase__ = tempfile.mkdtemp()
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> str:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCamelCase_ )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = mocks if isinstance(lowerCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _snake_case ( A ) -> int:
lowerCAmelCase__ = AcceleratorState()
lowerCAmelCase__ = tensor[None].clone().to(state.device )
lowerCAmelCase__ = gather(A ).cpu()
lowerCAmelCase__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A ):
return False
return True
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = returncode
lowerCAmelCase__ = stdout
lowerCAmelCase__ = stderr
async def _snake_case ( A , A ) -> Union[str, Any]:
while True:
lowerCAmelCase__ = await stream.readline()
if line:
callback(A )
else:
break
async def _snake_case ( A , A=None , A=None , A=None , A=False , A=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(A ) )
lowerCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def tee(A , A , A , A="" ):
lowerCAmelCase__ = line.decode('''utf-8''' ).rstrip()
sink.append(A )
if not quiet:
print(A , A , file=A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A : tee(A , A , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A : tee(A , A , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=A , )
return _RunOutput(await p.wait() , A , A )
def _snake_case ( A , A=None , A=None , A=180 , A=False , A=True ) -> _RunOutput:
lowerCAmelCase__ = asyncio.get_event_loop()
lowerCAmelCase__ = loop.run_until_complete(
_stream_subprocess(A , env=A , stdin=A , timeout=A , quiet=A , echo=A ) )
lowerCAmelCase__ = ''' '''.join(A )
if result.returncode > 0:
lowerCAmelCase__ = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class a__ ( a__ ):
'''simple docstring'''
pass
def _snake_case ( A , A=False ) -> Optional[int]:
try:
lowerCAmelCase__ = subprocess.check_output(A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A , '''decode''' ):
lowerCAmelCase__ = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e | 90 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 1 |
def __lowerCamelCase ( _lowercase ) -> set:
UpperCamelCase = set()
# edges = list of graph's edges
UpperCamelCase = get_edges(_lowercase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase , UpperCamelCase = edges.pop()
chosen_vertices.add(_lowercase )
chosen_vertices.add(_lowercase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowercase )
return chosen_vertices
def __lowerCamelCase ( _lowercase ) -> set:
UpperCamelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 716 |
from __future__ import annotations
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int | float:
if len(_lowercase ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(_lowercase )
or left < -len(_lowercase )
or right >= len(_lowercase )
or right < -len(_lowercase )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCamelCase = (left + right) >> 1 # the middle
UpperCamelCase = find_max(_lowercase , _lowercase , _lowercase ) # find max in range[left, mid]
UpperCamelCase = find_max(_lowercase , mid + 1 , _lowercase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 170 | 0 |
'''simple docstring'''
import argparse
import os
import re
a__ : Tuple ='''src/transformers'''
# Pattern that looks at the indentation in a line.
a__ : List[Any] =re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : Union[str, Any] =re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : List[str] =re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : List[Any] =re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Dict =re.compile(r'''\[([^\]]+)\]''')
def lowercase__ ( __lowercase : int ) -> Any:
"""simple docstring"""
__UpperCamelCase = _re_indent.search(__lowercase )
return "" if search is None else search.groups()[0]
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : List[Any]="" , __lowercase : Dict=None , __lowercase : Optional[int]=None ) -> str:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__lowercase ):
index += 1
__UpperCamelCase = ['\n'.join(lines[:index] )]
else:
__UpperCamelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__UpperCamelCase = [lines[index]]
index += 1
while index < len(__lowercase ) and (end_prompt is None or not lines[index].startswith(__lowercase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__lowercase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__lowercase ) )
if index < len(__lowercase ) - 1:
__UpperCamelCase = [lines[index + 1]]
index += 1
else:
__UpperCamelCase = []
else:
blocks.append('\n'.join(__lowercase ) )
__UpperCamelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__lowercase ) > 0:
blocks.append('\n'.join(__lowercase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__lowercase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase__ ( __lowercase : Any ) -> int:
"""simple docstring"""
def _inner(__lowercase : Tuple ):
return key(__lowercase ).lower().replace('_' , '' )
return _inner
def lowercase__ ( __lowercase : int , __lowercase : str=None ) -> str:
"""simple docstring"""
def noop(__lowercase : Optional[Any] ):
return x
if key is None:
__UpperCamelCase = noop
# Constants are all uppercase, they go first.
__UpperCamelCase = [obj for obj in objects if key(__lowercase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__UpperCamelCase = [obj for obj in objects if key(__lowercase )[0].isupper() and not key(__lowercase ).isupper()]
# Functions begin with a lowercase, they go last.
__UpperCamelCase = [obj for obj in objects if not key(__lowercase )[0].isupper()]
__UpperCamelCase = ignore_underscore(__lowercase )
return sorted(__lowercase , key=__lowercase ) + sorted(__lowercase , key=__lowercase ) + sorted(__lowercase , key=__lowercase )
def lowercase__ ( __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
def _replace(__lowercase : Union[str, Any] ):
__UpperCamelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
__UpperCamelCase = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCamelCase = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__lowercase )] ) + "]"
__UpperCamelCase = import_statement.split('\n' )
if len(__lowercase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__UpperCamelCase = 2 if lines[1].strip() == '[' else 1
__UpperCamelCase = [(i, _re_strip_line.search(__lowercase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__UpperCamelCase = sort_objects(__lowercase , key=lambda __lowercase : x[1] )
__UpperCamelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__lowercase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__UpperCamelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
__UpperCamelCase = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCamelCase = keys[:-1]
__UpperCamelCase = get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(__lowercase )] )
return "\n".join(__lowercase )
else:
# Finally we have to deal with imports fitting on one line
__UpperCamelCase = _re_bracket_content.sub(_replace , __lowercase )
return import_statement
def lowercase__ ( __lowercase : Optional[int] , __lowercase : int=True ) -> List[Any]:
"""simple docstring"""
with open(__lowercase , encoding='utf-8' ) as f:
__UpperCamelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__UpperCamelCase = split_code_in_indented_blocks(
__lowercase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__lowercase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__UpperCamelCase = main_blocks[block_idx]
__UpperCamelCase = block.split('\n' )
# Get to the start of the imports.
__UpperCamelCase = 0
while line_idx < len(__lowercase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__UpperCamelCase = len(__lowercase )
else:
line_idx += 1
if line_idx >= len(__lowercase ):
continue
# Ignore beginning and last line: they don't contain anything.
__UpperCamelCase = '\n'.join(block_lines[line_idx:-1] )
__UpperCamelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__UpperCamelCase = split_code_in_indented_blocks(__lowercase , indent_level=__lowercase )
# We have two categories of import key: list or _import_structure[key].append/extend
__UpperCamelCase = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__UpperCamelCase = [(pattern.search(__lowercase ).groups()[0] if pattern.search(__lowercase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__UpperCamelCase = [(i, key) for i, key in enumerate(__lowercase ) if key is not None]
__UpperCamelCase = [x[0] for x in sorted(__lowercase , key=lambda __lowercase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__UpperCamelCase = 0
__UpperCamelCase = []
for i in range(len(__lowercase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__UpperCamelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__lowercase )
count += 1
# And we put our main block back together with its first and last line.
__UpperCamelCase = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__lowercase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(__lowercase ) )
def lowercase__ ( __lowercase : List[Any]=True ) -> str:
"""simple docstring"""
__UpperCamelCase = []
for root, _, files in os.walk(__lowercase ):
if "__init__.py" in files:
__UpperCamelCase = sort_imports(os.path.join(__lowercase , '__init__.py' ) , check_only=__lowercase )
if result:
__UpperCamelCase = [os.path.join(__lowercase , '__init__.py' )]
if len(__lowercase ) > 0:
raise ValueError(F'''Would overwrite {len(__lowercase )} files, run `make style`.''' )
if __name__ == "__main__":
a__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
a__ : Dict =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 399 |
'''simple docstring'''
from math import pi, sqrt, tan
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__UpperCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__lowercase , 2 ) * torus_radius * tube_radius
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__UpperCamelCase = (sidea + sidea + sidea) / 2
__UpperCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowercase__ ( __lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowercase__ ( __lowercase : float , __lowercase : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowercase__ ( __lowercase : int , __lowercase : float ) -> float:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f'Rectangle: {area_rectangle(10, 20) = }')
print(f'Square: {area_square(10) = }')
print(f'Triangle: {area_triangle(10, 10) = }')
print(f'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(f'Parallelogram: {area_parallelogram(10, 20) = }')
print(f'Rhombus: {area_rhombus(10, 20) = }')
print(f'Trapezium: {area_trapezium(10, 20, 30) = }')
print(f'Circle: {area_circle(20) = }')
print(f'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(f'Cube: {surface_area_cube(20) = }')
print(f'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(f'Sphere: {surface_area_sphere(20) = }')
print(f'Hemisphere: {surface_area_hemisphere(20) = }')
print(f'Cone: {surface_area_cone(10, 20) = }')
print(f'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(f'Cylinder: {surface_area_cylinder(10, 20) = }')
print(f'Torus: {surface_area_torus(20, 10) = }')
print(f'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(f'Square: {area_reg_polygon(4, 10) = }')
print(f'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 399 | 1 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def snake_case__ ( _snake_case : float , _snake_case : float , _snake_case : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_snake_case ), magnitude * sin(_snake_case )]
return [magnitude * cos(radians(_snake_case ) ), magnitude * sin(radians(_snake_case ) )]
def snake_case__ ( _snake_case : NDArray[floataa] , _snake_case : NDArray[floataa] , _snake_case : float = 10**-1 ):
"""simple docstring"""
UpperCamelCase__ = cross(_snake_case , _snake_case )
UpperCamelCase__ = sum(_snake_case )
return abs(_snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
A : List[str] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
A : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
A : Tuple = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
A : int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
A : int = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
A : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 708 | """simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :Any ) -> None:
"""simple docstring"""
UpperCamelCase__ = data
UpperCamelCase__ = None
def __iter__( self :List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = self
UpperCamelCase__ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase__ = node.next_node
@property
def lowerCamelCase__ ( self :str ) -> bool:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
A : List[Any] = Node(1)
A : Optional[int] = Node(2)
A : List[Any] = Node(3)
A : str = Node(4)
print(root_node.has_loop) # False
A : Union[str, Any] = root_node.next_node
print(root_node.has_loop) # True
A : int = Node(5)
A : List[Any] = Node(6)
A : str = Node(5)
A : int = Node(6)
print(root_node.has_loop) # False
A : Union[str, Any] = Node(1)
print(root_node.has_loop) # False | 304 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {'vocab_file': 'sentencepiece.model'}
lowerCAmelCase : Optional[int] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
lowerCAmelCase : Union[str, Any] = {
'google/rembert': 2_56,
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
SCREAMING_SNAKE_CASE_ : Dict = remove_space
SCREAMING_SNAKE_CASE_ : Tuple = keep_accents
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[str] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
return pieces
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE )
return out_string
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('Vocabulary path ({}) should be a directory'.format(_SCREAMING_SNAKE_CASE ) )
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 511 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = (UniPCMultistepScheduler,)
SCREAMING_SNAKE_CASE : Union[str, Any] = (('''num_inference_steps''', 25),)
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample, sample
for t in range(_SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : List[str] = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if scheduler is None:
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , 'set_timesteps' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , 'set_timesteps' ):
SCREAMING_SNAKE_CASE_ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE_ : str = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE_ : List[str] = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
SCREAMING_SNAKE_CASE_ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : Any = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : Dict = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.full_loop(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , )
assert not torch.isnan(_SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE , time_step=0 )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.full_loop()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config(thresholding=_SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 10
SCREAMING_SNAKE_CASE_ : int = self.dummy_model()
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 511 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def snake_case__ ( self ) -> Dict:
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self ) -> str:
A__ = 'sgugger/tiny-distilbert-classification'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,only_pretrain_model=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self ) -> List[str]:
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,torchscript=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def snake_case__ ( self ) -> int:
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,fpaa=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self ) -> Any:
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(__UpperCAmelCase )
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase ,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self ) -> Tuple:
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def snake_case__ ( self ) -> int:
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=__UpperCAmelCase ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self ) -> int:
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(__UpperCAmelCase )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase ,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self ) -> int:
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(__UpperCAmelCase )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase ,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self ) -> List[str]:
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(__UpperCAmelCase )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase ,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self ) -> Optional[int]:
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(__UpperCAmelCase )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase ,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self ) -> Optional[Any]:
A__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,save_to_csv=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCAmelCase ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(__UpperCAmelCase ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCAmelCase ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(__UpperCAmelCase ,'train_time.csv' ) ,env_info_csv_file=os.path.join(__UpperCAmelCase ,'env.csv' ) ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase ,'env.csv' ) ).exists() )
def snake_case__ ( self ) -> str:
A__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCAmelCase ):
self.assertTrue(hasattr(__UpperCAmelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCAmelCase ,inference=__UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCAmelCase ,'log.txt' ) ,log_print=__UpperCAmelCase ,trace_memory_line_by_line=__UpperCAmelCase ,multi_process=__UpperCAmelCase ,)
A__ = PyTorchBenchmark(__UpperCAmelCase )
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase ,'log.txt' ) ).exists() )
| 705 | """simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__( enum.Enum ):
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = 2
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Optional[Any] = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A__ = None
if self.model.config.prefix is not None:
A__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A__ , A__ , A__ = self._sanitize_parameters(prefix=__UpperCAmelCase ,**self._forward_params )
A__ = {**self._preprocess_params, **preprocess_params}
A__ = {**self._forward_params, **forward_params}
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict:
A__ = {}
if prefix is not None:
A__ = prefix
if prefix:
A__ = self.tokenizer(
__UpperCAmelCase ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
A__ = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
A__ = handle_long_generation
preprocess_params.update(__UpperCAmelCase )
A__ = generate_kwargs
A__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
A__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
A__ = ReturnType.TENSORS
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*__UpperCAmelCase ,**__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="" ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Dict:
A__ = self.tokenizer(
prefix + prompt_text ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
A__ = prompt_text
if handle_long_generation == "hole":
A__ = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
A__ = generate_kwargs['max_new_tokens']
else:
A__ = generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
A__ = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
A__ = inputs['attention_mask'][:, -keep_length:]
return inputs
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
A__ = model_inputs['input_ids']
A__ = model_inputs.get('attention_mask' ,__UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
A__ = None
A__ = None
A__ = 1
else:
A__ = input_ids.shape[0]
A__ = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A__ = generate_kwargs.pop('prefix_length' ,0 )
if prefix_length > 0:
A__ = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
A__ = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A__ = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A__ = self.model.generate(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = generated_sequence.shape[0]
if self.framework == "pt":
A__ = generated_sequence.reshape(__UpperCAmelCase ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(__UpperCAmelCase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=ReturnType.FULL_TEXT ,__UpperCAmelCase=True ) -> str:
A__ = model_outputs['generated_sequence'][0]
A__ = model_outputs['input_ids']
A__ = model_outputs['prompt_text']
A__ = generated_sequence.numpy().tolist()
A__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A__ = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A__ = self.tokenizer.decode(
__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A__ = 0
else:
A__ = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) )
if return_type == ReturnType.FULL_TEXT:
A__ = prompt_text + text[prompt_length:]
else:
A__ = text[prompt_length:]
A__ = {'generated_text': all_text}
records.append(__UpperCAmelCase )
return records
| 536 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
UpperCamelCase = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
UpperCamelCase = {
'vinai/phobert-base': 2_56,
'vinai/phobert-large': 2_56,
}
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Union[str, Any]:
a_ : Union[str, Any] = set()
a_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ : Union[str, Any] = char
a_ : List[Any] = set(_SCREAMING_SNAKE_CASE )
return pairs
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = VOCAB_FILES_NAMES
lowerCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
a_ : str = vocab_file
a_ : Tuple = merges_file
a_ : str = {}
a_ : Any = 0
a_ : str = 1
a_ : Tuple = 2
a_ : Any = 3
self.add_from_file(_SCREAMING_SNAKE_CASE )
a_ : List[str] = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
a_ : Optional[int] = merges_handle.read().split("\n" )[:-1]
a_ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
a_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
a_ : Optional[Any] = {}
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
a_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
a_ : List[Any] = [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self ) -> Any:
return len(self.encoder )
def A ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
a_ : str = tuple(_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
a_ : Dict = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
a_ : Union[str, Any] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ : List[Any] = bigram
a_ : List[str] = []
a_ : str = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
a_ : List[str] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ : Dict = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : Optional[Any] = tuple(_SCREAMING_SNAKE_CASE )
a_ : List[str] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
a_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = "@@ ".join(_SCREAMING_SNAKE_CASE )
a_ : Tuple = word[:-4]
a_ : List[Any] = word
return word
def A ( self , _SCREAMING_SNAKE_CASE ) -> str:
a_ : Any = []
a_ : str = re.findall(R"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def A ( self , _SCREAMING_SNAKE_CASE ) -> Any:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Any:
a_ : Optional[Any] = " ".join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a_ : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , _SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def A ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
a_ : Any = f.readlines()
for lineTmp in lines:
a_ : Tuple = lineTmp.strip()
a_ : int = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
a_ : Optional[Any] = line[:idx]
a_ : Tuple = len(self.encoder )
| 473 | """simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_ () -> List[str]:
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_SCREAMING_SNAKE_CASE , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument("--batch_size" , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument("--freeze" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument("--learning_rate" , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument("--seed" , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_SCREAMING_SNAKE_CASE , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument("--weight_decay" , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--output_dir" , type=_SCREAMING_SNAKE_CASE , default="./results" )
return parser.parse_args()
UpperCamelCase = load('accuracy')
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
a_ , a_ : Tuple = eval_pred
a_ : int = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
super().__init__()
a_ : Optional[Any] = trainer
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if control.should_evaluate:
a_ : int = deepcopy(_SCREAMING_SNAKE_CASE )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def lowerCAmelCase_ () -> List[str]:
a_ : int = get_args()
set_seed(args.seed )
a_ : List[Any] = load_dataset("codeparrot/codecomplex" , split="train" )
a_ : str = dataset.train_test_split(test_size=0.2 )
a_ : Any = train_test["test"].train_test_split(test_size=0.5 )
a_ : List[str] = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
a_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a_ : Optional[Any] = tokenizer.eos_token
a_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
a_ : Optional[int] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a_ : Optional[Any] = False
a_ : Optional[int] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE :str ):
a_ : List[Any] = tokenizer(example["src"] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
a_ : List[Any] = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a_ : Any = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation["train"].column_names , )
a_ : Tuple = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
a_ : Optional[Any] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print("Training..." )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 473 | 1 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCamelCase ( A__ : List[str] ) -> Optional[int]:
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(A__ , """_dynamo""" ):
return False
return isinstance(A__ , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : bool = True ) -> Tuple:
lowerCamelCase_ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCamelCase_ : Dict = is_compiled_module(A__ )
if is_compiled:
lowerCamelCase_ : List[Any] = model
lowerCamelCase_ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__ , A__ ):
lowerCamelCase_ : Dict = model.module
if not keep_fpaa_wrapper:
lowerCamelCase_ : int = getattr(A__ , """forward""" )
lowerCamelCase_ : Tuple = model.__dict__.pop("""_original_forward""" , A__ )
if original_forward is not None:
while hasattr(A__ , """__wrapped__""" ):
lowerCamelCase_ : Union[str, Any] = forward.__wrapped__
if forward == original_forward:
break
lowerCamelCase_ : Optional[Any] = forward
if getattr(A__ , """_converted_to_transformer_engine""" , A__ ):
convert_model(A__ , to_transformer_engine=A__ )
if is_compiled:
lowerCamelCase_ : int = model
lowerCamelCase_ : List[str] = compiled_model
return model
def __lowerCamelCase ( ) -> Union[str, Any]:
PartialState().wait_for_everyone()
def __lowerCamelCase ( A__ : List[str] , A__ : str ) -> Union[str, Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__ , A__ )
elif PartialState().local_process_index == 0:
torch.save(A__ , A__ )
@contextmanager
def __lowerCamelCase ( **A__ : str ) -> Dict:
for key, value in kwargs.items():
lowerCamelCase_ : List[str] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCamelCase ( A__ : Optional[Any] ) -> Union[str, Any]:
if not hasattr(A__ , """__qualname__""" ) and not hasattr(A__ , """__name__""" ):
lowerCamelCase_ : Tuple = getattr(A__ , """__class__""" , A__ )
if hasattr(A__ , """__qualname__""" ):
return obj.__qualname__
if hasattr(A__ , """__name__""" ):
return obj.__name__
return str(A__ )
def __lowerCamelCase ( A__ : Tuple , A__ : Optional[int] ) -> Union[str, Any]:
for key, value in source.items():
if isinstance(A__ , A__ ):
lowerCamelCase_ : List[Any] = destination.setdefault(A__ , {} )
merge_dicts(A__ , A__ )
else:
lowerCamelCase_ : str = value
return destination
def __lowerCamelCase ( A__ : int = None ) -> bool:
if port is None:
lowerCamelCase_ : Dict = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 171 |
from math import asin, atan, cos, radians, sin, sqrt, tan
snake_case__ : List[Any] = 6_3_7_8_1_3_7.0
snake_case__ : List[str] = 6_3_5_6_7_5_2.3_1_4_2_4_5
snake_case__ : int = 637_8137
def __lowerCamelCase ( A__ : float , A__ : float , A__ : float , A__ : float ) -> float:
lowerCamelCase_ : Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
lowerCamelCase_ : int = atan((1 - flattening) * tan(radians(A__ ) ) )
lowerCamelCase_ : List[Any] = atan((1 - flattening) * tan(radians(A__ ) ) )
lowerCamelCase_ : Union[str, Any] = radians(A__ )
lowerCamelCase_ : Tuple = radians(A__ )
# Equation
lowerCamelCase_ : str = sin((phi_a - phi_a) / 2 )
lowerCamelCase_ : str = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCamelCase_ : List[str] = sqrt(sin_sq_phi + (cos(A__ ) * cos(A__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 44 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''') | 44 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCAmelCase ( __UpperCAmelCase ):
a : torch.FloatTensor
class lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
@register_to_config
def __init__( self , UpperCamelCase = 32 , UpperCamelCase = 64 , UpperCamelCase = 20 , UpperCamelCase = 768 , UpperCamelCase=77 , UpperCamelCase=4 , UpperCamelCase = 0.0 , UpperCamelCase = "silu" , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "linear" , UpperCamelCase = "prd" , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ):
super().__init__()
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = attention_head_dim
_SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim
_SCREAMING_SNAKE_CASE = additional_embeddings
_SCREAMING_SNAKE_CASE = time_embed_dim or inner_dim
_SCREAMING_SNAKE_CASE = embedding_proj_dim or embedding_dim
_SCREAMING_SNAKE_CASE = clip_embed_dim or embedding_dim
_SCREAMING_SNAKE_CASE = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_SCREAMING_SNAKE_CASE = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_SCREAMING_SNAKE_CASE = None
elif embedding_proj_norm_type == "layer":
_SCREAMING_SNAKE_CASE = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_SCREAMING_SNAKE_CASE = None
elif encoder_hid_proj_type == "linear":
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_SCREAMING_SNAKE_CASE = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
_SCREAMING_SNAKE_CASE = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn="gelu" , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_SCREAMING_SNAKE_CASE = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_SCREAMING_SNAKE_CASE = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' )
_SCREAMING_SNAKE_CASE = nn.LayerNorm(UpperCamelCase )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_SCREAMING_SNAKE_CASE = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , UpperCamelCase , persistent=UpperCamelCase )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = {}
def fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if hasattr(UpperCamelCase , "set_processor" ):
_SCREAMING_SNAKE_CASE = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if hasattr(UpperCamelCase , "set_processor" ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowercase ( self ):
self.set_attn_processor(AttnProcessor() )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , ):
_SCREAMING_SNAKE_CASE = hidden_states.shape[0]
_SCREAMING_SNAKE_CASE = timestep
if not torch.is_tensor(UpperCamelCase ):
_SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_SCREAMING_SNAKE_CASE = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_SCREAMING_SNAKE_CASE = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_SCREAMING_SNAKE_CASE = timesteps_projected.to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_SCREAMING_SNAKE_CASE = self.embedding_proj_norm(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
_SCREAMING_SNAKE_CASE = self.proj_in(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.positional_embedding.to(hidden_states.dtype )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_SCREAMING_SNAKE_CASE = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_SCREAMING_SNAKE_CASE = hidden_states[:, None, :]
_SCREAMING_SNAKE_CASE = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_SCREAMING_SNAKE_CASE = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_SCREAMING_SNAKE_CASE = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_SCREAMING_SNAKE_CASE = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_SCREAMING_SNAKE_CASE = hidden_states + positional_embeddings
if attention_mask is not None:
_SCREAMING_SNAKE_CASE = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_SCREAMING_SNAKE_CASE = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_SCREAMING_SNAKE_CASE = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_SCREAMING_SNAKE_CASE = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_SCREAMING_SNAKE_CASE = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_SCREAMING_SNAKE_CASE = block(UpperCamelCase , attention_mask=UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_SCREAMING_SNAKE_CASE = hidden_states[:, -1]
else:
_SCREAMING_SNAKE_CASE = hidden_states[:, additional_embeddings_len:]
_SCREAMING_SNAKE_CASE = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 493 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_snake_case : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _SCREAMING_SNAKE_CASE : Tuple ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
from transformers.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE ) | 493 | 1 |
'''simple docstring'''
def a_ ( __UpperCAmelCase ) -> bool:
"""simple docstring"""
if num < 0:
return False
snake_case: int =num
snake_case: int =0
while num > 0:
snake_case: int =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a = 'base_with_context'
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
snake_case: Tuple =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case: Dict =weights[f'''layers_{lyr_num}''']
snake_case: str =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case: Any =ly_weight['attention']
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Any =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
snake_case: Dict =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case: List[Any] =weights[f'''layers_{lyr_num}''']
snake_case: Tuple =ly_weight['attention']
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Tuple =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Any =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
snake_case: Tuple =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
snake_case: Any =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case: List[str] =weights[f'''layers_{lyr_num}''']
snake_case: Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
snake_case: int =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
snake_case: str =ly_weight['self_attention']
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Optional[Any] =ly_weight['MultiHeadDotProductAttention_0']
snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
snake_case: int =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case: Tuple =jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
snake_case: str =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
snake_case: List[Any] =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
snake_case: Optional[Any] =inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
snake_case: List[str] =inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
snake_case: List[Any] =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
snake_case: Optional[Any] =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case: Optional[Any] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case: List[Any] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case: Optional[Any] =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __UpperCAmelCase )
snake_case: Optional[Any] =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __UpperCAmelCase )
snake_case: Union[str, Any] =load_decoder(ta_checkpoint['target']['decoder'] , __UpperCAmelCase )
snake_case: int =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
snake_case: Optional[Any] =SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
a = parser.parse_args()
main(args)
| 350 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase__ :
"""simple docstring"""
_UpperCamelCase : Any = PegasusConfig
_UpperCamelCase : Optional[int] = {}
_UpperCamelCase : List[str] = 'gelu'
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=False , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case=0.1 , snake_case=0.1 , snake_case=20 , snake_case=2 , snake_case=1 , snake_case=0 , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCamelCase__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_pegasus_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def snake_case__ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = 20
UpperCamelCase__ = model_class_name(snake_case )
UpperCamelCase__ = model.encode(inputs_dict["input_ids"] )
UpperCamelCase__, UpperCamelCase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase__ = model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case )
UpperCamelCase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCamelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase__ = model.decode(
decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , )
UpperCamelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase__ = model.decode(
decoder_input_ids[:, -1:] , snake_case , decoder_attention_mask=snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case , )
UpperCamelCase__ = model.decode(snake_case , snake_case )
UpperCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def snake_case__ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = 20
UpperCamelCase__ = model_class_name(snake_case )
UpperCamelCase__ = model.encode(inputs_dict["input_ids"] )
UpperCamelCase__, UpperCamelCase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase__ = model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case )
UpperCamelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase__ = model.decode(
decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , )
UpperCamelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase__ = model.decode(
decoder_input_ids[:, -1:] , snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case , decoder_position_ids=snake_case , )
UpperCamelCase__ = model.decode(snake_case , snake_case , decoder_attention_mask=snake_case )
UpperCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def UpperCamelCase_( _A :Any , _A :Union[str, Any] , _A :Tuple , _A :Optional[Any]=None , _A :List[Any]=None , )-> int:
if attention_mask is None:
UpperCamelCase__ = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_UpperCamelCase : str = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : int = False
_UpperCamelCase : Any = False
_UpperCamelCase : List[Any] = False
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = FlaxPegasusModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case , snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case , snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case , snake_case )
UpperCamelCase__ = model_class(snake_case )
@jax.jit
def encode_jitted(snake_case , snake_case=None , **snake_case ):
return model.encode(input_ids=snake_case , attention_mask=snake_case )
with self.subTest("JIT Enabled" ):
UpperCamelCase__ = encode_jitted(**snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase__ = encode_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = model_class(snake_case )
UpperCamelCase__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCamelCase__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case , snake_case , snake_case ):
return model.decode(
decoder_input_ids=snake_case , decoder_attention_mask=snake_case , encoder_outputs=snake_case , )
with self.subTest("JIT Enabled" ):
UpperCamelCase__ = decode_jitted(**snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase__ = decode_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=snake_case )
UpperCamelCase__ = np.ones((1, 1) )
UpperCamelCase__ = model(snake_case )
self.assertIsNotNone(snake_case )
@slow
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
UpperCamelCase__ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
UpperCamelCase__ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCamelCase__ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
UpperCamelCase__ = tokenizer(snake_case , return_tensors="np" , truncation=snake_case , max_length=512 , padding=snake_case )
UpperCamelCase__ = model.generate(**snake_case , num_beams=2 ).sequences
UpperCamelCase__ = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
assert tgt_text == decoded
| 721 |
def UpperCamelCase_( _A :int , _A :int )-> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
UpperCamelCase__ = str(bin(_A ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCamelCase_( _A :int , _A :int )-> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
UpperCamelCase__ = str(bin(_A ) )[2:]
if shift_amount >= len(_A ):
return "0b0"
UpperCamelCase__ = binary_number[: len(_A ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCamelCase_( _A :int , _A :int )-> str:
if number >= 0: # Get binary representation of positive number
UpperCamelCase__ = "0" + str(bin(_A ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCamelCase__ = len(bin(_A )[3:] ) # Find 2's complement of number
UpperCamelCase__ = bin(abs(_A ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ = (
"1" + "0" * (binary_number_length - len(_A )) + binary_number
)
if shift_amount >= len(_A ):
return "0b" + binary_number[0] * len(_A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = ["a", "b", "c"]
# Defaults to last layer if both are None
lowercase__ , lowercase__ : Tuple = get_aligned_output_features_output_indices(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["c"] )
self.assertEqual(lowerCAmelCase_ , [2] )
# Out indices set to match out features
lowercase__ , lowercase__ : Optional[Any] = get_aligned_output_features_output_indices(["a", "c"] , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["a", "c"] )
self.assertEqual(lowerCAmelCase_ , [0, 2] )
# Out features set to match out indices
lowercase__ , lowercase__ : Dict = get_aligned_output_features_output_indices(lowerCAmelCase_ , [0, 2] , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["a", "c"] )
self.assertEqual(lowerCAmelCase_ , [0, 2] )
# Out features selected from negative indices
lowercase__ , lowercase__ : int = get_aligned_output_features_output_indices(lowerCAmelCase_ , [-3, -1] , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["a", "c"] )
self.assertEqual(lowerCAmelCase_ , [-3, -1] )
def __a ( self ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCAmelCase_ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(lowerCAmelCase_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(lowerCAmelCase_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : str = BackboneMixin()
lowercase__ : Optional[int] = ["a", "b", "c"]
lowercase__ : Optional[Any] = ["a", "c"]
lowercase__ : Any = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase__ : str = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase__ : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] ) | 397 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCAmelCase ( _a , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any =CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] =False
def a__ ( self ):
super().setUp()
_A= [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
_A= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a__ ( self ):
_A= CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
_A= '今天天气真好!'
_A= ['今天', '天气', '真', '好', '!']
_A= tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_A= '今天天气真好!'
_A= [tokenizer.bos_token] + tokens
_A= [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
_A= tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) | 476 | import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def a__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCAmelCase__ ):
_A= AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_A= FlaxAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCAmelCase__ ):
_A= AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_A= FlaxAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_A= AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_A= FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_A= tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**lowerCAmelCase__ )
eval(**lowerCAmelCase__ ).block_until_ready()
@slow
def a__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
_A= AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_A= FlaxRobertaModel.from_pretrained(lowerCAmelCase__ )
_A= tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**lowerCAmelCase__ )
eval(**lowerCAmelCase__ ).block_until_ready()
def a__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
_A= FlaxAutoModel.from_pretrained('bert-base' )
def a__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_A= FlaxAutoModel.from_pretrained(lowerCAmelCase__ , revision='aaaaaa' )
def a__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_A= FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self ):
with self.assertRaisesRegex(lowerCAmelCase__ , 'Use `from_pt=True` to load this model' ):
_A= FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) | 476 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Any = 5_0000
__snake_case : str = 5000
__snake_case : List[str] = os.path.split(__file__)
__snake_case : List[str] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _lowercase ( lowerCamelCase__ : datasets.Dataset, lowerCamelCase__ : Optional[Any] ):
for i in range(UpperCamelCase__ ):
_a = dataset[i]
@get_duration
def _lowercase ( lowerCamelCase__ : datasets.Dataset, lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for i in range(0, len(UpperCamelCase__ ), UpperCamelCase__ ):
_a = dataset[i : i + batch_size]
@get_duration
def _lowercase ( lowerCamelCase__ : datasets.Dataset, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any] ):
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
_a = dataset[i]
@get_duration
def _lowercase ( lowerCamelCase__ : datasets.Dataset, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any] ):
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(0, UpperCamelCase__, UpperCamelCase__ ):
_a = dataset[i : i + batch_size]
def _lowercase ( ):
_a = {'''num examples''': SPEED_TEST_N_EXAMPLES}
_a = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
_a = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_a = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_a = generate_example_dataset(
os.path.join(UpperCamelCase__, "dataset.arrow" ), UpperCamelCase__, num_examples=UpperCamelCase__, seq_shapes={"list": (100,)}, )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__, str(UpperCamelCase__ ) )
_a = func(UpperCamelCase__, **UpperCamelCase__ )
print("shuffling dataset" )
_a = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled ", func.__name__, str(UpperCamelCase__ ) )
_a = func(
UpperCamelCase__, **UpperCamelCase__ )
with open(UpperCamelCase__, "wb" ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 131 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''speech_to_text_2'''
a__ =['''past_key_values''']
a__ ={'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , A=1_0_0_0_0 , A=6 , A=2_0_4_8 , A=4 , A=0.0 , A=True , A="relu" , A=2_5_6 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=2 , A=True , A=1 , A=0 , A=2 , A=1_0_2_4 , **A , ) -> Optional[Any]:
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : Dict = decoder_ffn_dim
_UpperCAmelCase : Dict = decoder_layers
_UpperCAmelCase : Optional[Any] = decoder_attention_heads
_UpperCAmelCase : int = dropout
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Any = activation_dropout
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : List[str] = init_std
_UpperCAmelCase : Any = decoder_layerdrop
_UpperCAmelCase : Tuple = use_cache
_UpperCAmelCase : List[Any] = decoder_layers
_UpperCAmelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Dict = max_target_positions
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , decoder_start_token_id=A , **A , )
| 506 | 0 |
UpperCAmelCase_ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__lowerCamelCase = Stack()
__lowerCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A__ ) )
elif i in operators:
# RULE 2
operator_stack.push(A__ )
elif i == ")":
# RULE 4
__lowerCamelCase = operator_stack.peek()
operator_stack.pop()
__lowerCamelCase = operand_stack.peek()
operand_stack.pop()
__lowerCamelCase = operand_stack.peek()
operand_stack.pop()
__lowerCamelCase = operators[opr](A__ , A__ )
operand_stack.push(A__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 80 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Optional[int]=30 , __lowerCamelCase : Any=2 , __lowerCamelCase : str=3 , __lowerCamelCase : Tuple=True , __lowerCamelCase : int=True , __lowerCamelCase : str=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=10 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Union[str, Any]=2 , ) -> Dict:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = scope
__lowercase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
__lowercase = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
__lowercase = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowercase = 1
__lowercase = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> str:
'''simple docstring'''
__lowercase = self.type_sequence_label_size
__lowercase = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__lowercase = ViTModelTester(self )
__lowercase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(__lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Any:
__lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__lowercase = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**__lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__lowercase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__lowerCamelCase )
__lowercase = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
__lowercase = prepare_img()
__lowercase = image_processor(images=__lowerCamelCase , return_tensors='pt' )
__lowercase = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(__lowerCamelCase , interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __lowerCamelCase )
__lowercase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
__lowercase = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=__lowerCamelCase , return_tensors='pt' )
__lowercase = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowercase = model(__lowerCamelCase )
| 375 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''http://www.mocksite.com/file1.txt'''
SCREAMING_SNAKE_CASE_ : str = '''"text": ["foo", "foo"]'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = 2_00
__UpperCamelCase = {'''Content-Length''': '''100'''}
__UpperCamelCase = {}
def UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
'''simple docstring'''
return [bytes(__lowerCamelCase , 'utf-8' )]
def SCREAMING_SNAKE_CASE ( *snake_case , **snake_case ) -> int:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
import requests
monkeypatch.setattr(snake_case , 'request' , snake_case )
__lowercase = URL
if issubclass(snake_case , snake_case ):
__lowercase = url
elif issubclass(snake_case , snake_case ):
__lowercase = [url]
elif issubclass(snake_case , snake_case ):
__lowercase = {'train': url}
__lowercase = 'dummy'
__lowercase = 'downloads'
__lowercase = tmp_path
__lowercase = DownloadConfig(
cache_dir=os.path.join(snake_case , snake_case ) , use_etag=snake_case , )
__lowercase = DownloadManager(dataset_name=snake_case , download_config=snake_case )
__lowercase = dl_manager.download(snake_case )
__lowercase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case , snake_case ):
__lowercase = [downloaded_paths]
__lowercase = [urls]
elif isinstance(snake_case , snake_case ):
assert "train" in downloaded_paths.keys()
__lowercase = downloaded_paths.values()
__lowercase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case , snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowercase = Path(snake_case )
__lowercase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowercase = downloaded_path.read_text()
assert content == CONTENT
__lowercase = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
__lowercase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Union[str, Any]:
__lowercase = str(snake_case )
if issubclass(snake_case , snake_case ):
__lowercase = filename
elif issubclass(snake_case , snake_case ):
__lowercase = [filename]
elif issubclass(snake_case , snake_case ):
__lowercase = {'train': filename}
__lowercase = 'dummy'
__lowercase = xz_file.parent
__lowercase = 'extracted'
__lowercase = DownloadConfig(
cache_dir=snake_case , use_etag=snake_case , )
__lowercase = DownloadManager(dataset_name=snake_case , download_config=snake_case )
__lowercase = dl_manager.extract(snake_case )
__lowercase = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case , snake_case ):
__lowercase = [extracted_paths]
__lowercase = [paths]
elif isinstance(snake_case , snake_case ):
assert "train" in extracted_paths.keys()
__lowercase = extracted_paths.values()
__lowercase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case , snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowercase = Path(snake_case )
__lowercase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case , etag=snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowercase = extracted_path.read_text()
__lowercase = text_file.read_text()
assert extracted_file_content == expected_file_content
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(snake_case , start=1 ):
__lowercase = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
__lowercase = request.getfixturevalue(snake_case )
__lowercase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case ) , start=1 ):
_test_jsonl(snake_case , snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Any:
__lowercase = request.getfixturevalue(snake_case )
__lowercase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case ) , start=1 ):
_test_jsonl(snake_case , snake_case )
assert num_tar == 1
assert num_jsonl == 2
def SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
__lowercase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case ) , start=1 ):
assert os.path.basename(snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 375 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = 'levit'
def __init__( self , UpperCAmelCase=2_24 , UpperCAmelCase=3 , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=16 , UpperCAmelCase=[1_28, 2_56, 3_84] , UpperCAmelCase=[4, 8, 12] , UpperCAmelCase=[4, 4, 4] , UpperCAmelCase=[16, 16, 16] , UpperCAmelCase=0 , UpperCAmelCase=[2, 2, 2] , UpperCAmelCase=[2, 2, 2] , UpperCAmelCase=0.02 , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
a_ = image_size
a_ = num_channels
a_ = kernel_size
a_ = stride
a_ = padding
a_ = hidden_sizes
a_ = num_attention_heads
a_ = depths
a_ = key_dim
a_ = drop_path_rate
a_ = patch_size
a_ = attention_ratio
a_ = mlp_ratio
a_ = initializer_range
a_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self ):
return 1e-4
| 511 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ ='pt'
elif is_tf_available():
lowercase__ ='tf'
else:
lowercase__ ='jax'
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : int = PerceiverTokenizer
lowerCamelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self ):
super().setUp()
a_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self ):
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def lowerCAmelCase__ ( self , **UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=20 , UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
a_ = []
for i in range(len(UpperCAmelCase ) ):
try:
a_ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a_ = list(filter(lambda UpperCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCAmelCase ) )
a_ = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
a_ = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
a_ = toks + toks
# toks_str = [t[1] for t in toks]
a_ = [t[0] for t in toks]
# Ensure consistency
a_ = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
a_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
a_ = """ """ + output_txt
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = """Unicode €."""
a_ = tokenizer(UpperCAmelCase )
a_ = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCAmelCase )
# decoding
a_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """[CLS]Unicode €.[SEP]""" )
a_ = tokenizer("""e è é ê ë""" )
a_ = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCAmelCase )
# decoding
a_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
a_ = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
a_ = list(batch.input_ids.numpy()[0] )
else:
a_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCAmelCase )
self.assertIn("""attention_mask""" , UpperCAmelCase )
self.assertNotIn("""decoder_input_ids""" , UpperCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = [
"""Summary of the text.""",
"""Another summary.""",
]
a_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="""max_length""" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCAmelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = """ He is very happy, UNwant\u00E9d,running"""
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
a_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
a_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
a_ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
a_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a_ = json.load(UpperCAmelCase )
a_ = [f'''<extra_id_{i}>''' for i in range(1_25 )]
a_ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
a_ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a_ = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a_ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCAmelCase )]
a_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
a_ = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
a_ = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 511 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 458 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = 10**-10 ):
A_ : Tuple = a
while True:
A_ : List[str] = Decimal(snake_case__ ) - (
Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case__ ) ) < precision: # noqa: S307
return float(snake_case__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 180 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase : Dict = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase : int = 'UperNetConfig'
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int = 0 , __lowerCamelCase : Tuple = False , __lowerCamelCase : Optional[int] = 1 , ) -> None:
super().__init__()
a = nn.Convad(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE , )
a = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
a = nn.ReLU()
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> torch.Tensor:
a = self.conv(_SCREAMING_SNAKE_CASE )
a = self.batch_norm(_SCREAMING_SNAKE_CASE )
a = self.activation(_SCREAMING_SNAKE_CASE )
return output
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> None:
super().__init__()
a = [
nn.AdaptiveAvgPoolad(_SCREAMING_SNAKE_CASE ),
UperNetConvModule(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[int] ) -> torch.Tensor:
a = input
for layer in self.layers:
a = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> None:
super().__init__()
a = pool_scales
a = align_corners
a = in_channels
a = channels
a = []
for i, pool_scale in enumerate(_SCREAMING_SNAKE_CASE ):
a = UperNetPyramidPoolingBlock(pool_scale=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , channels=_SCREAMING_SNAKE_CASE )
self.blocks.append(_SCREAMING_SNAKE_CASE )
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[torch.Tensor]:
a = []
for ppm in self.blocks:
a = ppm(_SCREAMING_SNAKE_CASE )
a = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(_SCREAMING_SNAKE_CASE )
return ppm_outs
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Dict:
super().__init__()
a = config
a = config.pool_scales # e.g. (1, 2, 3, 6)
a = in_channels
a = config.hidden_size
a = False
a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a = nn.ModuleList()
a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a = UperNetConvModule(_SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_SCREAMING_SNAKE_CASE )
self.fpn_convs.append(_SCREAMING_SNAKE_CASE )
a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __UpperCAmelCase ( self : str ) -> List[str]:
self.apply(self._init_weights )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Dict ) -> Dict:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] ) -> List[str]:
a = inputs[-1]
a = [x]
psp_outs.extend(self.psp_modules(_SCREAMING_SNAKE_CASE ) )
a = torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
a = self.bottleneck(_SCREAMING_SNAKE_CASE )
return output
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str ) -> torch.Tensor:
# build laterals
a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_SCREAMING_SNAKE_CASE ) )
# build top-down path
a = len(_SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a = laterals[i - 1].shape[2:]
a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_SCREAMING_SNAKE_CASE , mode="bilinear" , align_corners=self.align_corners )
# build outputs
a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
a = torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
a = self.fpn_bottleneck(_SCREAMING_SNAKE_CASE )
a = self.classifier(_SCREAMING_SNAKE_CASE )
return output
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : str = 2 , __lowerCamelCase : Dict = 3 , __lowerCamelCase : Union[str, Any] = 1 ) -> None:
super().__init__()
a = config
a = config.auxiliary_in_channels
a = config.auxiliary_channels
a = config.auxiliary_num_convs
a = config.auxiliary_concat_input
a = in_index
a = (kernel_size // 2) * dilation
a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
a = nn.Identity()
else:
a = nn.Sequential(*_SCREAMING_SNAKE_CASE )
if self.concat_input:
a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __UpperCAmelCase ( self : Tuple ) -> str:
self.apply(self._init_weights )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> Optional[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ) -> torch.Tensor:
# just take the relevant feature maps
a = encoder_hidden_states[self.in_index]
a = self.convs(_SCREAMING_SNAKE_CASE )
if self.concat_input:
a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a = self.classifier(_SCREAMING_SNAKE_CASE )
return output
class snake_case__ (lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = UperNetConfig
SCREAMING_SNAKE_CASE_ : Tuple = "pixel_values"
SCREAMING_SNAKE_CASE_ : Tuple = True
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ) -> Tuple:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : str=False ) -> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a = value
__lowerCAmelCase : List[str] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase : List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCAmelCase__ , )
class snake_case__ (lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any] ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE )
a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a = UperNetHead(_SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
a = UperNetFCNHead(_SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] = None , __lowerCamelCase : str = None , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : Any = None , __lowerCamelCase : Tuple = None , ) -> Union[tuple, SemanticSegmenterOutput]:
a = return_dict if return_dict is not None else self.config.use_return_dict
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = output_attentions if output_attentions is not None else self.config.output_attentions
a = self.backbone.forward_with_filtered_kwargs(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
a = outputs.feature_maps
a = self.decode_head(_SCREAMING_SNAKE_CASE )
a = nn.functional.interpolate(_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE )
a = None
if self.auxiliary_head is not None:
a = self.auxiliary_head(_SCREAMING_SNAKE_CASE )
a = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE )
a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a = (logits,) + outputs[1:]
else:
a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
# Initialise PyTorch model
lowerCamelCase__ = BigBirdConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
lowerCamelCase__ = BigBirdForQuestionAnswering(__lowerCAmelCase )
else:
lowerCamelCase__ = BigBirdForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCAmelCase , __lowerCAmelCase , is_trivia_qa=__lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
UpperCamelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 1 |
'''simple docstring'''
from PIL import Image
def UpperCamelCase_ ( A__ : Image ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : Tuple = image.size
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : int = image.load()
for i in range(A__ ):
for j in range(A__ ):
lowerCAmelCase_ : Tuple = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(A__ ):
for i in range(A__ ):
lowerCAmelCase_ : Any = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__A : List[Any] = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 398 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : Dict = "http://www.mocksite.com/file1.txt"
__A : List[str] = "\"text\": [\"foo\", \"foo\"]"
__A : int = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __snake_case :
"""simple docstring"""
lowercase = 2_00
lowercase = {'Content-Length': '100'}
lowercase = {}
def __lowercase ( self : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> str:
return [bytes(lowerCamelCase , """utf-8""" )]
def UpperCamelCase_ ( *A__ : List[str] , **A__ : Union[str, Any] ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def UpperCamelCase_ ( A__ : List[Any] , A__ : List[Any] , A__ : str ):
'''simple docstring'''
import requests
monkeypatch.setattr(A__ , """request""" , A__ )
lowerCAmelCase_ : Tuple = URL
if issubclass(A__ , A__ ):
lowerCAmelCase_ : Optional[Any] = url
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Dict = [url]
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Tuple = {"""train""": url}
lowerCAmelCase_ : List[Any] = """dummy"""
lowerCAmelCase_ : str = """downloads"""
lowerCAmelCase_ : Dict = tmp_path
lowerCAmelCase_ : Any = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
lowerCAmelCase_ : List[Any] = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCAmelCase_ : int = dl_manager.download(A__ )
lowerCAmelCase_ : Any = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ : str = [downloaded_paths]
lowerCAmelCase_ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
lowerCAmelCase_ : Union[str, Any] = downloaded_paths.values()
lowerCAmelCase_ : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCAmelCase_ : Tuple = Path(A__ )
lowerCAmelCase_ : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCAmelCase_ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
lowerCAmelCase_ : Tuple = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowerCAmelCase_ : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] , A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : int = str(A__ )
if issubclass(A__ , A__ ):
lowerCAmelCase_ : int = filename
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : List[str] = [filename]
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Union[str, Any] = {"""train""": filename}
lowerCAmelCase_ : Optional[int] = """dummy"""
lowerCAmelCase_ : str = xz_file.parent
lowerCAmelCase_ : List[str] = """extracted"""
lowerCAmelCase_ : Union[str, Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
lowerCAmelCase_ : str = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCAmelCase_ : Union[str, Any] = dl_manager.extract(A__ )
lowerCAmelCase_ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ : List[str] = [extracted_paths]
lowerCAmelCase_ : Union[str, Any] = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
lowerCAmelCase_ : Union[str, Any] = extracted_paths.values()
lowerCAmelCase_ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCAmelCase_ : int = Path(A__ )
lowerCAmelCase_ : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCAmelCase_ : Any = extracted_path.read_text()
lowerCAmelCase_ : Optional[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase_ ( A__ : Dict , A__ : Any ):
'''simple docstring'''
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(A__ , start=1 ):
lowerCAmelCase_ : int = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = request.getfixturevalue(A__ )
lowerCAmelCase_ : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCamelCase_ ( A__ : str , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = request.getfixturevalue(A__ )
lowerCAmelCase_ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 398 | 1 |
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(_lowercase , _lowercase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ : List[str] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowercase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 422 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : str = [True] * limit
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Union[str, Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ : List[Any] = i * 2
while index < limit:
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : str = index + i
lowerCamelCase_ : str = [2]
for i in range(3 , _lowercase , 2 ):
if is_prime[i]:
primes.append(_lowercase )
return primes
def lowercase_ ( _lowercase = 1_000_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = prime_sieve(_lowercase )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
for j in range(i + length , len(_lowercase ) ):
lowerCamelCase_ : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ : int = j - i
lowerCamelCase_ : Any = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 422 | 1 |
'''simple docstring'''
from math import factorial
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = [1] * rank
else:
__lowercase = rank
def __repr__( self ) -> Optional[int]:
"""simple docstring"""
return (
F'{self.real}+'
F'{"+".join(str(lowerCamelCase__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
__lowercase = self.duals.copy()
__lowercase = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
__lowercase = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
snake_case_ : Any = __add__
def __sub__( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return self + other * -1
def __mul__( self , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
__lowercase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
snake_case_ : int = __mul__
def __truediv__( self , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__lowercase = self
for _ in range(n - 1 ):
x *= self
return x
def snake_case_ ( a__ : Optional[Any] ,a__ : Any ,a__ : List[Any] ):
"""simple docstring"""
if not callable(a__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(a__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(a__ ,a__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
__lowercase = Dual(a__ ,1 )
__lowercase = func(a__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def snake_case_ ( a__ : Optional[int] ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 719 |
'''simple docstring'''
from statistics import mean, stdev
def snake_case_ ( a__ : list ,a__ : int = 3 ):
"""simple docstring"""
__lowercase = min(a__ )
__lowercase = max(a__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,a__ ) for x in data]
def snake_case_ ( a__ : list ,a__ : int = 3 ):
"""simple docstring"""
__lowercase = mean(a__ )
__lowercase = stdev(a__ )
# standardize data
return [round((x - mu) / (sigma) ,a__ ) for x in data]
| 163 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
_snake_case = parser.parse_args()
if args.model_type == "roberta":
_snake_case = RobertaForMaskedLM.from_pretrained(args.model_name)
_snake_case = '''roberta'''
elif args.model_type == "gpt2":
_snake_case = GPTaLMHeadModel.from_pretrained(args.model_name)
_snake_case = '''transformer'''
_snake_case = model.state_dict()
_snake_case = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_snake_case = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_snake_case = F'{prefix}.embeddings.{w}.weight'
_snake_case = state_dict[param_name]
for w in ["weight", "bias"]:
_snake_case = F'{prefix}.embeddings.LayerNorm.{w}'
_snake_case = state_dict[param_name]
# Transformer Blocks #
_snake_case = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_snake_case = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
_snake_case = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_snake_case = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_snake_case = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
_snake_case = state_dict[F'lm_head.dense.{w}']
_snake_case = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_snake_case = state_dict[F'{prefix}.ln_f.{w}']
_snake_case = state_dict['''lm_head.weight''']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 382 | import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_snake_case = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_snake_case = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_snake_case = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: List[str]=True , __lowerCamelCase: Tuple=False ) -> str:
if rouge_types is None:
__UpperCAmelCase : Optional[Any] = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__UpperCAmelCase : Tuple = rouge_scorer.RougeScorer(rouge_types=__lowerCamelCase , use_stemmer=__lowerCamelCase )
if use_aggregator:
__UpperCAmelCase : Union[str, Any] = scoring.BootstrapAggregator()
else:
__UpperCAmelCase : str = []
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Dict = scorer.score(__lowerCamelCase , __lowerCamelCase )
if use_aggregator:
aggregator.add_scores(__lowerCamelCase )
else:
scores.append(__lowerCamelCase )
if use_aggregator:
__UpperCAmelCase : Tuple = aggregator.aggregate()
else:
__UpperCAmelCase : Union[str, Any] = {}
for key in scores[0]:
__UpperCAmelCase : Dict = [score[key] for score in scores]
return result
| 382 | 1 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ""
SCREAMING_SNAKE_CASE : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
SCREAMING_SNAKE_CASE : str = None # compression type in fsspec. ex: "gzip"
SCREAMING_SNAKE_CASE : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Union[str, Any] , snake_case : str = "" , snake_case : Optional[str] = None , snake_case : Optional[dict] = None , **snake_case : Any ) -> Dict:
super().__init__(self , **snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCAmelCase : Dict = fsspec.open(
snake_case , mode='''rb''' , protocol=snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCAmelCase : Dict = os.path.basename(self.file.path.split('''::''' )[0] )
__UpperCAmelCase : Tuple = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__UpperCAmelCase : str = None
@classmethod
def lowerCamelCase__ ( cls : Tuple , snake_case : int ) -> Optional[int]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(snake_case ).lstrip('''/''' )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
if self.dir_cache is None:
__UpperCAmelCase : List[str] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__UpperCAmelCase : int = {f['''name''']: f}
def lowerCamelCase__ ( self : Optional[Any] , snake_case : str ) -> Union[str, Any]:
return self.file.open().read()
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : str , snake_case : str = "rb" , snake_case : Any=None , snake_case : Optional[int]=True , snake_case : int=None , **snake_case : str , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self._strip_protocol(snake_case )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "bz2"
SCREAMING_SNAKE_CASE : Optional[Any] = "bz2"
SCREAMING_SNAKE_CASE : Any = ".bz2"
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "gzip"
SCREAMING_SNAKE_CASE : Union[str, Any] = "gzip"
SCREAMING_SNAKE_CASE : Union[str, Any] = ".gz"
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "lz4"
SCREAMING_SNAKE_CASE : List[str] = "lz4"
SCREAMING_SNAKE_CASE : Union[str, Any] = ".lz4"
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "xz"
SCREAMING_SNAKE_CASE : Union[str, Any] = "xz"
SCREAMING_SNAKE_CASE : Union[str, Any] = ".xz"
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = "zstd"
SCREAMING_SNAKE_CASE : Union[str, Any] = "zstd"
SCREAMING_SNAKE_CASE : Any = ".zst"
def __init__( self : Union[str, Any] , snake_case : str , snake_case : str = "rb" , snake_case : Optional[str] = None , snake_case : Optional[dict] = None , snake_case : int = DEFAULT_BLOCK_SIZE , **snake_case : str , ) -> int:
super().__init__(
fo=snake_case , mode=snake_case , target_protocol=snake_case , target_options=snake_case , block_size=snake_case , **snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCAmelCase : Tuple = self.file.__enter__
class a :
"""simple docstring"""
def __init__( self : Tuple , snake_case : List[str] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = file_
def __enter__( self : List[Any] ) -> List[Any]:
self._file.__enter__()
return self
def __exit__( self : Dict , *snake_case : Optional[Any] , **snake_case : List[str] ) -> Dict:
self._file.__exit__(*snake_case , **snake_case )
def __iter__( self : List[Any] ) -> Optional[Any]:
return iter(self._file )
def lowerCamelCase__ ( self : Any ) -> Dict:
return next(self._file )
def __getattr__( self : Dict , snake_case : List[Any] ) -> Tuple:
return getattr(self._file , snake_case )
def fixed_enter(*snake_case : List[str] , **snake_case : Tuple ):
return WrappedFile(_enter(*snake_case , **snake_case ) )
__UpperCAmelCase : Dict = fixed_enter | 266 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase :Optional[Any] = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase :List[Any] = "RegNetConfig"
# Base docstring
__UpperCAmelCase :List[Any] = "facebook/regnet-y-040"
__UpperCAmelCase :Union[str, Any] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__UpperCAmelCase :int = "facebook/regnet-y-040"
__UpperCAmelCase :Optional[Any] = "tabby, tabby cat"
__UpperCAmelCase :Dict = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case : int , snake_case : int = 3 , snake_case : int = 1 , snake_case : int = 1 , snake_case : Optional[str] = "relu" , **snake_case : Any , ) -> Union[str, Any]:
super().__init__(**snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCAmelCase : Union[str, Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=snake_case , strides=snake_case , padding='''VALID''' , groups=snake_case , use_bias=snake_case , name='''convolution''' , )
__UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__UpperCAmelCase : Any = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase__ ( self : Any , snake_case : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.convolution(self.padding(snake_case ) )
__UpperCAmelCase : List[Any] = self.normalization(snake_case )
__UpperCAmelCase : Optional[Any] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case : RegNetConfig , **snake_case : Tuple ) -> int:
super().__init__(**snake_case )
__UpperCAmelCase : List[str] = config.num_channels
__UpperCAmelCase : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Dict ) -> int:
__UpperCAmelCase : int = shape_list(snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCAmelCase : Dict = tf.transpose(snake_case , perm=(0, 2, 3, 1) )
__UpperCAmelCase : List[str] = self.embedder(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : int , snake_case : int = 2 , **snake_case : Tuple ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : str = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=1 , strides=snake_case , use_bias=snake_case , name='''convolution''' )
__UpperCAmelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def lowerCamelCase__ ( self : str , snake_case : tf.Tensor , snake_case : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(snake_case ) , training=snake_case )
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : int , snake_case : int , **snake_case : Tuple ) -> List[Any]:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='''pooler''' )
__UpperCAmelCase : Dict = [
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowerCamelCase__ ( self : Optional[int] , snake_case : Tuple ) -> Union[str, Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCAmelCase : str = self.pooler(snake_case )
for layer_module in self.attention:
__UpperCAmelCase : int = layer_module(snake_case )
__UpperCAmelCase : List[Any] = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : int ) -> int:
super().__init__(**snake_case )
__UpperCAmelCase : Any = in_channels != out_channels or stride != 1
__UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Optional[int] = (
TFRegNetShortCut(snake_case , stride=snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='''layer.2''' ),
]
__UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Any = layer_module(snake_case )
__UpperCAmelCase : Tuple = self.shortcut(snake_case )
hidden_state += residual
__UpperCAmelCase : Optional[int] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : List[str] ) -> Optional[int]:
super().__init__(**snake_case )
__UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
__UpperCAmelCase : Optional[Any] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Any = (
TFRegNetShortCut(snake_case , stride=snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__UpperCAmelCase : List[str] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='''layer.3''' ),
]
__UpperCAmelCase : Dict = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Any = layer_module(snake_case )
__UpperCAmelCase : int = self.shortcut(snake_case )
hidden_state += residual
__UpperCAmelCase : Optional[int] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 2 , snake_case : int = 2 , **snake_case : str ) -> Optional[Any]:
super().__init__(**snake_case )
__UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__UpperCAmelCase : str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , snake_case , stride=snake_case , name='''layers.0''' ),
*[layer(snake_case , snake_case , snake_case , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase__ ( self : List[str] , snake_case : Any ) -> List[Any]:
for layer_module in self.layers:
__UpperCAmelCase : Optional[Any] = layer_module(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : RegNetConfig , **snake_case : int ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__UpperCAmelCase : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case , snake_case , snake_case , depth=snake_case , name=f'stages.{i+1}' ) )
def lowerCamelCase__ ( self : int , snake_case : tf.Tensor , snake_case : bool = False , snake_case : bool = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCAmelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase : Any = hidden_states + (hidden_state,)
__UpperCAmelCase : List[Any] = stage_module(snake_case )
if output_hidden_states:
__UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case )
@keras_serializable
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RegNetConfig
def __init__( self : Dict , snake_case : str , **snake_case : Optional[int] ) -> Any:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = config
__UpperCAmelCase : List[str] = TFRegNetEmbeddings(snake_case , name='''embedder''' )
__UpperCAmelCase : List[str] = TFRegNetEncoder(snake_case , name='''encoder''' )
__UpperCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='''pooler''' )
@unpack_inputs
def lowerCamelCase__ ( self : Dict , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[Any] = self.embedder(snake_case , training=snake_case )
__UpperCAmelCase : Optional[int] = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
__UpperCAmelCase : List[str] = encoder_outputs[0]
__UpperCAmelCase : str = self.pooler(snake_case )
# Change to NCHW output format have uniformity in the modules
__UpperCAmelCase : Optional[Any] = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
__UpperCAmelCase : str = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCAmelCase : Dict = tuple([tf.transpose(snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = RegNetConfig
SCREAMING_SNAKE_CASE : Tuple = "regnet"
SCREAMING_SNAKE_CASE : List[Any] = "pixel_values"
@property
def lowerCamelCase__ ( self : int ) -> List[str]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__UpperCAmelCase :Optional[int] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase :List[Any] = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _a , )
class a ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case : RegNetConfig , *snake_case : Optional[int] , **snake_case : List[str] ) -> Tuple:
super().__init__(snake_case , *snake_case , **snake_case )
__UpperCAmelCase : Dict = TFRegNetMainLayer(snake_case , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : Tuple , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : str=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCAmelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Dict = self.regnet(
pixel_values=snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _a , )
class a ( _a , _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : RegNetConfig , *snake_case : Optional[Any] , **snake_case : List[Any] ) -> List[Any]:
super().__init__(snake_case , *snake_case , **snake_case )
__UpperCAmelCase : List[Any] = config.num_labels
__UpperCAmelCase : Optional[int] = TFRegNetMainLayer(snake_case , name='''regnet''' )
# classification head
__UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : Tuple , snake_case : tf.Tensor = None , snake_case : tf.Tensor = None , snake_case : bool = None , snake_case : bool = None , snake_case : Tuple=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[int] = self.regnet(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
__UpperCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase : Tuple = self.classifier[0](snake_case )
__UpperCAmelCase : Tuple = self.classifier[1](snake_case )
__UpperCAmelCase : Any = None if labels is None else self.hf_compute_loss(labels=snake_case , logits=snake_case )
if not return_dict:
__UpperCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) | 266 | 1 |
_a: List[str] = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_a: int = frozenset(["""prompt""", """negative_prompt"""])
_a: Tuple = frozenset([])
_a: Optional[int] = frozenset(["""image"""])
_a: Optional[Any] = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_a: List[Any] = frozenset(["""image"""])
_a: Optional[int] = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_a: int = frozenset(["""prompt""", """image""", """negative_prompt"""])
_a: int = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_a: List[Any] = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_a: Union[str, Any] = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_a: int = frozenset(["""image""", """mask_image"""])
_a: Tuple = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_a: Dict = frozenset(["""example_image""", """image""", """mask_image"""])
_a: List[Any] = frozenset(["""class_labels"""])
_a: List[Any] = frozenset(["""class_labels"""])
_a: List[Any] = frozenset(["""batch_size"""])
_a: Dict = frozenset([])
_a: Dict = frozenset(["""batch_size"""])
_a: List[Any] = frozenset([])
_a: Union[str, Any] = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_a: Optional[Any] = frozenset(["""prompt""", """negative_prompt"""])
_a: Dict = frozenset(["""input_tokens"""])
_a: Dict = frozenset(["""input_tokens"""]) | 162 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = GPTaTokenizer
SCREAMING_SNAKE_CASE__ = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
UpperCAmelCase_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ = {"unk_token": "<unk>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase ) )
def __A ( self : Optional[int] , **lowerCAmelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __A ( self : Union[str, Any] , **lowerCAmelCase : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __A ( self : Optional[Any] , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = "lower newer"
return input_text, output_text
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = "lower newer"
# Testing tokenization
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing the unknown token
UpperCAmelCase_ = tokens + [rust_tokenizer.unk_token]
UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : int ):
'''simple docstring'''
pass
def __A ( self : str , lowerCAmelCase : List[str]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input looooooooong", "This is a simple input"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" )
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ = tokenizer(*lowerCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" )
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = "$$$"
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase , add_bos_token=lowerCAmelCase )
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer(lowerCAmelCase )
UpperCAmelCase_ = tokenizer(lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase_ = tokenizer.decode(out_s.input_ids )
UpperCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = [self.get_tokenizer(do_lower_case=lowerCAmelCase , add_bos_token=lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ = "Encode this."
UpperCAmelCase_ = "This one too please."
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
encoded_sequence += tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode_plus(
lowerCAmelCase , lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , )
UpperCAmelCase_ = encoded_sequence_dict["input_ids"]
UpperCAmelCase_ = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
UpperCAmelCase_ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase )
]
UpperCAmelCase_ = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase )
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("./test_opt" )
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowerCAmelCase )
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
# Same as above
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase )
UpperCAmelCase_ = "bos"
UpperCAmelCase_ = tokenizer.get_vocab()["bos"]
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
# We changed the bos token
self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] ) | 162 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __snake_case , __snake_case ):
@register_to_config
def __init__( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any = None , lowerCamelCase__ : Tuple = None ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : List[str] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__UpperCamelCase : Tuple = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
__UpperCamelCase : Any = None
__UpperCamelCase : int = torch.nn.Parameter(__UpperCamelCase )
class _A ( __snake_case ):
lowercase_ : Optional[Any] = 42
lowercase_ : Optional[Any] = 42
lowercase_ : Optional[int] = 42
lowercase_ : Optional[int] = 42
lowercase_ : Dict = 42
lowercase_ : Tuple = 42
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def a ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCamelCase : Dict = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
__UpperCamelCase : Any = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__UpperCamelCase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCamelCase : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__UpperCamelCase : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__UpperCamelCase : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__UpperCamelCase : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
__UpperCamelCase : Union[str, Any] = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__UpperCamelCase : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
__UpperCamelCase : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
__UpperCamelCase : List[str] = [""""""] * batch_size
__UpperCamelCase : List[Any] = text_input_ids.shape[-1]
__UpperCamelCase : Optional[int] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__UpperCamelCase : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__UpperCamelCase : str = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCamelCase : Union[str, Any] = negative_prompt_embeds.shape[1]
__UpperCamelCase : Any = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
__UpperCamelCase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCamelCase : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] = 1_00 , lowerCamelCase__ : Dict = 5.0 , lowerCamelCase__ : List[Any] = 1.0 , lowerCamelCase__ : Tuple = 1 , lowerCamelCase__ : Union[str, Any] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : Tuple = "pil" , lowerCamelCase__ : int = True , lowerCamelCase__ : str = None , lowerCamelCase__ : List[str] = 1 , ):
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : Tuple = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : Dict = len(__UpperCamelCase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}' )
__UpperCamelCase : List[str] = batch_size * num_images_per_prompt
__UpperCamelCase : Tuple = guidance_scale > 1.0
__UpperCamelCase : str = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__UpperCamelCase )}.' )
# get the initial completely masked latents unless the user supplied it
__UpperCamelCase : List[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__UpperCamelCase : Dict = self.transformer.num_vector_embeds - 1
__UpperCamelCase : Tuple = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__UpperCamelCase : int = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
__UpperCamelCase : Dict = self.scheduler.timesteps.to(self.device )
__UpperCamelCase : Optional[int] = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
__UpperCamelCase : Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__UpperCamelCase : int = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
__UpperCamelCase , __UpperCamelCase : str = model_output.chunk(2 )
__UpperCamelCase : Union[str, Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
__UpperCamelCase : str = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase : Tuple = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = self.vqvae.config.vq_embed_dim
__UpperCamelCase : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__UpperCamelCase : Union[str, Any] = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
__UpperCamelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase : List[str] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def a ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
__UpperCamelCase : int = torch.exp(__UpperCamelCase )
__UpperCamelCase : Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__UpperCamelCase : List[Any] = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.cat((all_true, keep_mask) , dim=1 )
__UpperCamelCase : List[Any] = keep_mask[:, :-1, :]
__UpperCamelCase : str = keep_mask.gather(1 , indices.argsort(1 ) )
__UpperCamelCase : Optional[Any] = log_p_x_0.clone()
__UpperCamelCase : Tuple = -torch.inf # -inf = log(0)
return rv
| 702 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 515 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any ,A : int ):
__A = [[] for _ in range(A )]
__A = size
def __getitem__( self : str ,A : int ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self._size
def UpperCamelCase_ ( self : Union[str, Any] ,A : int ,A : int ,A : int ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(A ,A ) )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : int ):
__A = deque([start_vertex] )
__A = [None] * self.size
__A = 0
while queue:
__A = queue.popleft()
__A = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__A = current_distance + edge.weight
__A = distances[edge.destination_vertex]
if (
isinstance(A ,A )
and new_distance >= dest_vertex_distance
):
continue
__A = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 208 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 1:
_lowerCAmelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_snake_case )
_lowerCAmelCase = 1
for i in range(1 , _snake_case ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 489 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = 3
__lowerCamelCase : Tuple = (32, 32)
__lowerCamelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def a_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def a_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def a_ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(A__ )
@property
def a_ ( self : Optional[Any] ):
"""simple docstring"""
def extract(*A__ : int , **A__ : str ):
class SCREAMING_SNAKE_CASE :
def __init__( self : str ):
"""simple docstring"""
__lowerCamelCase : int = torch.ones([0] )
def a_ ( self : int , A__ : Any ):
"""simple docstring"""
self.pixel_values.to(A__ )
return self
return Out()
return extract
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : int = self.dummy_cond_unet
__lowerCamelCase : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
__lowerCamelCase : str = self.dummy_vae
__lowerCamelCase : Optional[Any] = self.dummy_text_encoder
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__lowerCamelCase : int = StableDiffusionPipeline(
unet=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , safety_checker=A__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase : Dict = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__lowerCamelCase : Any = """A painting of a squirrel eating a burger"""
__lowerCamelCase : str = torch.Generator(device=A__ ).manual_seed(0 )
__lowerCamelCase : Optional[int] = sd_pipe([prompt] , generator=A__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : str = torch.Generator(device=A__ ).manual_seed(0 )
__lowerCamelCase : int = sd_pipe(
[prompt] , generator=A__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A__ , )[0]
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Dict = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Any = self.dummy_cond_unet
__lowerCamelCase : Tuple = PNDMScheduler(skip_prk_steps=A__ )
__lowerCamelCase : Dict = self.dummy_vae
__lowerCamelCase : Union[str, Any] = self.dummy_text_encoder
__lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__lowerCamelCase : Dict = StableDiffusionPipeline(
unet=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , safety_checker=A__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase : Optional[Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__lowerCamelCase : str = """A painting of a squirrel eating a burger"""
__lowerCamelCase : Dict = torch.Generator(device=A__ ).manual_seed(0 )
__lowerCamelCase : List[Any] = sd_pipe([prompt] , generator=A__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__lowerCamelCase : List[Any] = output.images
__lowerCamelCase : str = torch.Generator(device=A__ ).manual_seed(0 )
__lowerCamelCase : List[Any] = sd_pipe(
[prompt] , generator=A__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A__ , )[0]
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=A__ )
assert isinstance(A__ , A__ )
assert isinstance(pipe.scheduler , A__ )
assert pipe.safety_checker is None
__lowerCamelCase : int = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A__ )
__lowerCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(A__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCamelCase : Union[str, Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def a_ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = self.dummy_cond_unet
__lowerCamelCase : List[Any] = PNDMScheduler(skip_prk_steps=A__ )
__lowerCamelCase : Any = self.dummy_vae
__lowerCamelCase : Optional[int] = self.dummy_text_encoder
__lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__lowerCamelCase : List[str] = unet.half()
__lowerCamelCase : Optional[Any] = vae.half()
__lowerCamelCase : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
__lowerCamelCase : int = StableDiffusionPipeline(
unet=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , safety_checker=A__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase : List[Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__lowerCamelCase : List[str] = """A painting of a squirrel eating a burger"""
__lowerCamelCase : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : str = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A__ )
__lowerCamelCase : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowerCamelCase : Dict = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__lowerCamelCase : Dict = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__lowerCamelCase : Tuple = 4003660346
__lowerCamelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
__lowerCamelCase : Union[str, Any] = torch.manual_seed(A__ )
__lowerCamelCase : str = sd_pipe(
[prompt] , generator=A__ , guidance_scale=A__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase : Union[str, Any] = output.images
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
__lowerCamelCase : Dict = torch.manual_seed(A__ )
__lowerCamelCase : str = sd_pipe(
[prompt] , generator=A__ , guidance_scale=A__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A__ )
__lowerCamelCase : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowerCamelCase : Any = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__lowerCamelCase : str = """padme amidala taking a bath artwork, safe for work, no nudity"""
__lowerCamelCase : int = 2734971755
__lowerCamelCase : List[str] = 7
__lowerCamelCase : Dict = torch.manual_seed(A__ )
__lowerCamelCase : Optional[int] = sd_pipe(
[prompt] , generator=A__ , guidance_scale=A__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase : Optional[int] = output.images
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
__lowerCamelCase : Union[str, Any] = torch.manual_seed(A__ )
__lowerCamelCase : str = sd_pipe(
[prompt] , generator=A__ , guidance_scale=A__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase : Dict = output.images
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__lowerCamelCase : str = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__lowerCamelCase : str = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__lowerCamelCase : List[Any] = 1044355234
__lowerCamelCase : Any = 12
__lowerCamelCase : int = torch.manual_seed(A__ )
__lowerCamelCase : Tuple = sd_pipe(
[prompt] , generator=A__ , guidance_scale=A__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase : str = output.images
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
__lowerCamelCase : Tuple = torch.manual_seed(A__ )
__lowerCamelCase : int = sd_pipe(
[prompt] , generator=A__ , guidance_scale=A__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : List[str] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 150 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
__lowerCamelCase : Optional[int] = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self, *_lowercase, **_lowercase ):
if hasattr(self, """_hf_hook""" ) and hasattr(self._hf_hook, """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self, *_lowercase, **_lowercase )
return wrapper
| 150 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowercase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
__UpperCamelCase =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
__UpperCamelCase =text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}] )
__UpperCamelCase =text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
] , )
__UpperCamelCase =text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
# Legacy behavior
__UpperCamelCase =text_classifier('''This is great !''' , return_all_scores=__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
__UpperCamelCase =text_classifier('''This is great !''' , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}]] )
__UpperCamelCase =text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
] , )
__UpperCamelCase =text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''label''': '''LABEL_0''', '''score''': 0.5_04},
{'''label''': '''LABEL_0''', '''score''': 0.5_04},
] , )
@require_torch
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
import torch
__UpperCamelCase =pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
__UpperCamelCase =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
@require_tf
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
__UpperCamelCase =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
@slow
@require_torch
def UpperCAmelCase_ ( self : Any ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =pipeline('''text-classification''' )
__UpperCamelCase =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__UpperCamelCase =text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__UpperCamelCase =text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] )
@slow
@require_tf
def UpperCAmelCase_ ( self : int ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =pipeline('''text-classification''' , framework='''tf''' )
__UpperCamelCase =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__UpperCamelCase =text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__UpperCamelCase =text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =TextClassificationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__UpperCamelCase ='''HuggingFace is in'''
__UpperCamelCase =text_classifier(__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
__UpperCamelCase =['''HuggingFace is in ''', '''Paris is in France''']
__UpperCamelCase =text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}, {'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__UpperCamelCase =text_classifier(__lowerCAmelCase , top_k=__lowerCAmelCase )
__UpperCamelCase =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] * N, [{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] * N] , )
__UpperCamelCase ={'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
__UpperCamelCase =text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__UpperCamelCase =[['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(__lowerCAmelCase ):
text_classifier(__lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__UpperCamelCase =text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 707 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''ChineseCLIPImageProcessor'''
lowercase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
__UpperCamelCase =kwargs.pop('''feature_extractor''' )
__UpperCamelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =self.image_processor
def __call__( self : List[str] , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__UpperCamelCase =self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
__UpperCamelCase =self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
__UpperCamelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
__UpperCamelCase =self.tokenizer.model_input_names
__UpperCamelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class
| 296 | 0 |
UpperCAmelCase : Dict = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase : Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase : Tuple = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Any ):
'''simple docstring'''
assert len(str(lowerCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCamelCase = year // 100
lowerCamelCase = (5 * (century % 4) + 2) % 7
lowerCamelCase = year % 100
lowerCamelCase = centurian % 12
lowerCamelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCamelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCamelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 457 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase__ )
if number < 1:
__lowerCamelCase : int = F"Input value of [number={number}] must be > 0"
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
__lowerCamelCase : List[Any] = [3, 5]
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : List[str] = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a =0
try:
a =proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 652 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case ( a_ : List[Any] ) -> Any:
"""simple docstring"""
for param in module.parameters():
UpperCamelCase_ : Dict = False
def snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ : int = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def snake_case ( a_ : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def snake_case ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = datetime.now()
UpperCamelCase_ : int = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 704 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
random.seed(a_ )
np.random.seed(a_ )
torch.manual_seed(a_ )
torch.cuda.manual_seed_all(a_ )
# ^^ safe to call this function even if cuda is not available
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 0.99_99 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0 , __lowerCAmelCase = False , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 2 / 3 , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase_ : Dict = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase_ : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase_ : Optional[int] = True
if kwargs.get("""max_value""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : Tuple = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase_ : str = kwargs["""max_value"""]
if kwargs.get("""min_value""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : Dict = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = kwargs["""min_value"""]
UpperCamelCase_ : Optional[Any] = list(__lowerCAmelCase )
UpperCamelCase_ : Any = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : str = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
self.to(device=kwargs["""device"""] )
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : Optional[Any] = decay
UpperCamelCase_ : List[str] = min_decay
UpperCamelCase_ : int = update_after_step
UpperCamelCase_ : Optional[int] = use_ema_warmup
UpperCamelCase_ : Optional[int] = inv_gamma
UpperCamelCase_ : Any = power
UpperCamelCase_ : str = 0
UpperCamelCase_ : List[str] = None # set in `step()`
UpperCamelCase_ : Union[str, Any] = model_cls
UpperCamelCase_ : Any = model_config
@classmethod
def _UpperCAmelCase ( cls , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ , UpperCamelCase_ : int = model_cls.load_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase )
UpperCamelCase_ : str = model_cls.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = cls(model.parameters() , model_cls=__lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(__lowerCAmelCase )
return ema_model
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
UpperCamelCase_ : int = self.model_cls.from_config(self.model_config )
UpperCamelCase_ : List[Any] = self.state_dict()
state_dict.pop("""shadow_params""" , __lowerCAmelCase )
model.register_to_config(**__lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Any = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase_ : List[Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase_ : List[Any] = (1 + step) / (10 + step)
UpperCamelCase_ : Optional[Any] = min(__lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase_ : Optional[Any] = max(__lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase_ : str = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase_ : int = parameters.parameters()
UpperCamelCase_ : Optional[int] = list(__lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase_ : Any = self.get_decay(self.optimization_step )
UpperCamelCase_ : List[str] = decay
UpperCamelCase_ : Any = 1 - decay
UpperCamelCase_ : Optional[int] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase_ : Optional[Any] = deepspeed.zero.GatheredParameters(__lowerCAmelCase , modifier_rank=__lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : str = list(__lowerCAmelCase )
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def _UpperCAmelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None ):
UpperCamelCase_ : Union[str, Any] = [
p.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) if p.is_floating_point() else p.to(device=__lowerCAmelCase )
for p in self.shadow_params
]
def _UpperCAmelCase ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[str] = [param.detach().cpu().clone() for param in parameters]
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , __lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase_ : List[Any] = None
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = copy.deepcopy(__lowerCAmelCase )
UpperCamelCase_ : int = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
UpperCamelCase_ : Dict = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , __lowerCAmelCase ):
raise ValueError("""Invalid min_decay""" )
UpperCamelCase_ : str = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , __lowerCAmelCase ):
raise ValueError("""Invalid optimization_step""" )
UpperCamelCase_ : Dict = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , __lowerCAmelCase ):
raise ValueError("""Invalid update_after_step""" )
UpperCamelCase_ : List[str] = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __lowerCAmelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
UpperCamelCase_ : Any = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
UpperCamelCase_ : str = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
UpperCamelCase_ : Dict = state_dict.get("""shadow_params""" , __lowerCAmelCase )
if shadow_params is not None:
UpperCamelCase_ : Any = shadow_params
if not isinstance(self.shadow_params , __lowerCAmelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(__lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 543 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Union[str, Any] = logging.get_logger(__name__)
__a :Optional[int] = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = 'vit_mae'
def __init__( self : Union[str, Any] , UpperCAmelCase : Any=768 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : str=12 , UpperCAmelCase : Any=3072 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : int=0.02 , UpperCAmelCase : int=1E-12 , UpperCAmelCase : List[str]=224 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=16 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=2048 , UpperCAmelCase : Dict=0.75 , UpperCAmelCase : Dict=False , **UpperCAmelCase : Tuple , ):
super().__init__(**UpperCAmelCase )
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = layer_norm_eps
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = qkv_bias
A_ = decoder_num_attention_heads
A_ = decoder_hidden_size
A_ = decoder_num_hidden_layers
A_ = decoder_intermediate_size
A_ = mask_ratio
A_ = norm_pix_loss | 86 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __UpperCAmelCase :
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
a__ : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : List[Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : Dict = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ : Optional[int] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
a__ : Tuple = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
a__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
a__ : Dict = self.get_dummy_components()
a__ : Any = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : Any = self.get_dummy_inputs(a_ )
a__ : Optional[int] = inputs["prompt"]
a__ : List[Any] = inputs["generator"]
a__ : Optional[int] = inputs["num_inference_steps"]
a__ : Any = inputs["output_type"]
if "image" in inputs:
a__ : Any = inputs["image"]
else:
a__ : Dict = None
if "mask_image" in inputs:
a__ : Optional[int] = inputs["mask_image"]
else:
a__ : Any = None
if "original_image" in inputs:
a__ : List[Any] = inputs["original_image"]
else:
a__ : str = None
a__ , a__ : Optional[int] = pipe.encode_prompt(a_ )
# inputs with prompt converted to embeddings
a__ : Union[str, Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
a__ : Dict = image
if mask_image is not None:
a__ : Any = mask_image
if original_image is not None:
a__ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
a__ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
a__ : List[str] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"`{optional_component}` did not stay set to None after loading." , )
a__ : Union[str, Any] = self.get_dummy_inputs(a_ )
a__ : str = inputs["generator"]
a__ : Dict = inputs["num_inference_steps"]
a__ : Optional[int] = inputs["output_type"]
# inputs with prompt converted to embeddings
a__ : List[Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
a__ : Dict = image
if mask_image is not None:
a__ : Any = mask_image
if original_image is not None:
a__ : Dict = original_image
a__ : Optional[Any] = pipe_loaded(**a_ )[0]
a__ : int = np.abs(to_np(a_ ) - to_np(a_ ) ).max()
self.assertLess(a_ , 1E-4 )
def UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
a__ : Dict = self.get_dummy_components()
a__ : Dict = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : List[str] = self.get_dummy_inputs(a_ )
a__ : Dict = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
a__ : str = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
a__ : Optional[int] = self.get_dummy_inputs(a_ )
a__ : Optional[int] = pipe_loaded(**a_ )[0]
a__ : List[Any] = np.abs(to_np(a_ ) - to_np(a_ ) ).max()
self.assertLess(a_ , 1E-4 ) | 642 | 0 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = len(lowerCamelCase )
__lowercase = sum(lowerCamelCase )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 53 |
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : List[str] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 543 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : int = """▁"""
lowerCAmelCase : str = {"""vocab_file""": """prophetnet.tokenizer"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
lowerCAmelCase : Any = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
lowerCAmelCase : List[Any] = {
"""microsoft/xprophetnet-large-wiki100-cased""": 512,
}
def a__ ( snake_case__ ) -> int:
lowerCamelCase = collections.OrderedDict()
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as reader:
lowerCamelCase = reader.readlines()
for index, token in enumerate(snake_case__ ):
lowerCamelCase = token.rstrip("""\n""" )
lowerCamelCase = index
return vocab
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="[SEP]" , _a="[SEP]" , _a="[SEP]" , _a="[UNK]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , sep_token=_a , unk_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
lowerCamelCase = f'[unused{i}]'
lowerCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase = 12
lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_a )
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , _a ):
"""simple docstring"""
lowerCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , _a , _a = None , _a = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return ([0] * len(_a )) + [1]
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.sp_model.encode(_a , out_type=_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = """""".join(_a ).replace(_a , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 543 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
SCREAMING_SNAKE_CASE__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = {}
with open(_snake_case , "r" ) as file:
for line_number, line in enumerate(_snake_case ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = line.strip()
if line:
SCREAMING_SNAKE_CASE_ :Dict = line.split()
SCREAMING_SNAKE_CASE_ :List[Any] = line_number
SCREAMING_SNAKE_CASE_ :List[Any] = words[0]
SCREAMING_SNAKE_CASE_ :Tuple = value
return result
def lowercase ( a , a , a , a , a ):
'''simple docstring'''
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_ :Any = getattr(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE_ :Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
SCREAMING_SNAKE_CASE_ :Tuple = PARAM_MAPPING[full_name.split("." )[-1]]
SCREAMING_SNAKE_CASE_ :int = "param"
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ :Any = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ :List[Any] = hf_pointer
for attribute in hf_param_name.split("." ):
SCREAMING_SNAKE_CASE_ :int = getattr(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE_ :str = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE_ :List[str] = value[0]
else:
SCREAMING_SNAKE_CASE_ :int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ :List[str] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ :int = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ :Dict = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ :int = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
SCREAMING_SNAKE_CASE_ :str = getattr(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE_ :Dict = value
else:
SCREAMING_SNAKE_CASE_ :Dict = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase ( a , a , a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
SCREAMING_SNAKE_CASE_ :Optional[int] = PARAM_MAPPING[full_name.split("." )[-1]]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = "param"
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ :List[str] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ :Any = ".".join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = key
SCREAMING_SNAKE_CASE_ :Optional[int] = value if "lm_head" in full_key else value[0]
SCREAMING_SNAKE_CASE__ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowercase ( a , a , a=None , a=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ :Tuple = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE_ :str = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ :Tuple = name.split(_snake_case )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ :Optional[int] = mapped_key.replace("*" , _snake_case )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ :int = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ :List[str] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE_ :Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ :List[str] = "weight"
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
SCREAMING_SNAKE_CASE_ :List[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ :Any = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ :str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_ :Any = True
else:
SCREAMING_SNAKE_CASE_ :List[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase ( a , a , a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_ :List[str] = name.split("." )
SCREAMING_SNAKE_CASE_ :List[str] = int(items[0] )
SCREAMING_SNAKE_CASE_ :Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE_ :int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE_ :int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE_ :int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE_ :Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( a , a , a=None , a=None , a=True , a=False ):
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE_ :Optional[int] = WavaVecaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE_ :Dict = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE_ :Tuple = read_txt_into_dict(_snake_case )
SCREAMING_SNAKE_CASE_ :Tuple = idalabel
SCREAMING_SNAKE_CASE_ :Tuple = WavaVecaForSequenceClassification(_snake_case )
SCREAMING_SNAKE_CASE_ :int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_ :int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_ :str = target_dict.pad_index
SCREAMING_SNAKE_CASE_ :List[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE_ :Any = target_dict.eos_index
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE_ :Any = os.path.join(_snake_case , "vocab.json" )
if not os.path.isdir(_snake_case ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
SCREAMING_SNAKE_CASE_ :str = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE_ :Optional[Any] = 0
SCREAMING_SNAKE_CASE_ :Optional[Any] = 1
with open(_snake_case , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE_ :List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_snake_case , )
SCREAMING_SNAKE_CASE_ :List[Any] = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE_ :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
SCREAMING_SNAKE_CASE_ :List[Any] = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
SCREAMING_SNAKE_CASE_ :List[str] = WavaVecaForCTC(_snake_case )
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE_ :str = argparse.Namespace(task="audio_pretraining" )
SCREAMING_SNAKE_CASE_ :List[Any] = fairseq.tasks.setup_task(_snake_case )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
SCREAMING_SNAKE_CASE_ :Optional[Any] = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 711 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Optional[int] = ["""image_processor""", """tokenizer"""]
lowerCamelCase_ : Tuple = """LayoutLMv2ImageProcessor"""
lowerCamelCase_ : Optional[int] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self : Tuple , UpperCAmelCase : List[str]=None , UpperCAmelCase : Any=None , **UpperCAmelCase : Dict):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :Any = kwargs.pop("feature_extractor")
SCREAMING_SNAKE_CASE_ :int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(UpperCAmelCase , UpperCAmelCase)
def __call__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True.")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True.")
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
# first, apply the image processor
SCREAMING_SNAKE_CASE_ :str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE_ :Tuple = features["words"]
SCREAMING_SNAKE_CASE_ :List[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
SCREAMING_SNAKE_CASE_ :Optional[Any] = features.pop("pixel_values")
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE_ :Dict = self.get_overflowing_images(UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"])
SCREAMING_SNAKE_CASE_ :List[str] = images
return encoded_inputs
def _snake_case ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any]):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
SCREAMING_SNAKE_CASE_ :int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(UpperCAmelCase) != len(UpperCAmelCase):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(UpperCAmelCase)} and {len(UpperCAmelCase)}")
return images_with_overflow
def _snake_case ( self : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int]):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Tuple , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any]):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase)
@property
def _snake_case ( self : Any):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _snake_case ( self : List[Any]):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def _snake_case ( self : Union[str, Any]):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 140 | 0 |
def A ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__UpperCamelCase , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 9 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( A__ ):
lowercase__ = SwinConfig(image_size=192 )
if "base" in model_name:
lowercase__ = 6
lowercase__ = 128
lowercase__ = (2, 2, 18, 2)
lowercase__ = (4, 8, 16, 32)
elif "large" in model_name:
lowercase__ = 12
lowercase__ = 192
lowercase__ = (2, 2, 18, 2)
lowercase__ = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
lowercase__ = window_size
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
return config
def _lowerCAmelCase ( A__ ):
if "encoder.mask_token" in name:
lowercase__ = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
lowercase__ = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
lowercase__ = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
lowercase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
lowercase__ = 'layernorm.weight'
if name == "encoder.norm.bias":
lowercase__ = 'layernorm.bias'
if "decoder" in name:
pass
else:
lowercase__ = 'swin.' + name
return name
def _lowerCAmelCase ( A__ , A__ ):
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase__ = key.split('.' )
lowercase__ = int(key_split[2] )
lowercase__ = int(key_split[4] )
lowercase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[
:dim
]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[
-dim:
]
else:
lowercase__ = val
return orig_state_dict
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = torch.load(A__ , map_location='cpu' )['model']
lowercase__ = get_swin_config(A__ )
lowercase__ = SwinForMaskedImageModeling(A__ )
model.eval()
lowercase__ = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = ViTImageProcessor(size={'height': 192, 'width': 192} )
lowercase__ = Image.open(requests.get(A__ , stream=A__ ).raw )
lowercase__ = image_processor(images=A__ , return_tensors='pt' )
with torch.no_grad():
lowercase__ = model(**A__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a__ : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 622 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 700 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case_ :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
class snake_case_ :
"""simple docstring"""
def __init__( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : list[list[Edge]] = [[] for _ in range(lowercase)]
UpperCAmelCase_ : str = size
def __getitem__( self ,lowercase):
"""simple docstring"""
return iter(self._graph[vertex])
@property
def A_ ( self):
"""simple docstring"""
return self._size
def A_ ( self ,lowercase ,lowercase ,lowercase):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(lowercase ,lowercase))
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : int = deque([start_vertex])
UpperCAmelCase_ : list[int | None] = [None] * self.size
UpperCAmelCase_ : Dict = 0
while queue:
UpperCAmelCase_ : List[Any] = queue.popleft()
UpperCAmelCase_ : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase_ : Tuple = current_distance + edge.weight
UpperCAmelCase_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(lowercase ,lowercase)
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455 | 0 |
'''simple docstring'''
from collections import defaultdict
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
_lowerCAmelCase = first_str.lower().strip()
_lowerCAmelCase = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase = first_str.replace(""" """ , """""" )
_lowerCAmelCase = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase = defaultdict(__lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = input("""Enter the first string """).strip()
_lowercase = input("""Enter the second string """).strip()
_lowercase = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 5 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: List[Any] = "detr"
lowerCamelCase__: Tuple = ["past_key_values"]
lowerCamelCase__: Optional[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[Any] , __lowerCamelCase: Tuple=True , __lowerCamelCase: Tuple=None , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: Dict=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[Any]=6 , __lowerCamelCase: Optional[int]=20_48 , __lowerCamelCase: Union[str, Any]=8 , __lowerCamelCase: Any=0.0 , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: Dict=True , __lowerCamelCase: int="relu" , __lowerCamelCase: Any=2_56 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: int=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: Any="sine" , __lowerCamelCase: str="resnet50" , __lowerCamelCase: str=True , __lowerCamelCase: List[Any]=False , __lowerCamelCase: Dict=1 , __lowerCamelCase: List[Any]=5 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Any=1 , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Dict=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Optional[Any]=0.1 , **__lowerCamelCase: Tuple , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = backbone_config.get("model_type" )
__UpperCAmelCase : Any = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Optional[Any] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = None, None, None
__UpperCAmelCase : Union[str, Any] = use_timm_backbone
__UpperCAmelCase : Dict = backbone_config
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : List[str] = num_queries
__UpperCAmelCase : Any = d_model
__UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[str] = encoder_attention_heads
__UpperCAmelCase : str = decoder_ffn_dim
__UpperCAmelCase : Any = decoder_layers
__UpperCAmelCase : Optional[Any] = decoder_attention_heads
__UpperCAmelCase : int = dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Dict = init_std
__UpperCAmelCase : List[str] = init_xavier_std
__UpperCAmelCase : Union[str, Any] = encoder_layerdrop
__UpperCAmelCase : List[Any] = decoder_layerdrop
__UpperCAmelCase : List[str] = encoder_layers
__UpperCAmelCase : List[Any] = auxiliary_loss
__UpperCAmelCase : Optional[Any] = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : Dict = use_pretrained_backbone
__UpperCAmelCase : str = dilation
# Hungarian matcher
__UpperCAmelCase : Tuple = class_cost
__UpperCAmelCase : Union[str, Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Tuple = mask_loss_coefficient
__UpperCAmelCase : List[Any] = dice_loss_coefficient
__UpperCAmelCase : Optional[int] = bbox_loss_coefficient
__UpperCAmelCase : List[Any] = giou_loss_coefficient
__UpperCAmelCase : Dict = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: int ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: Optional[Any] ) -> Dict:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Dict[str, any]:
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: int = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: int ) -> int:
return 12
| 382 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__UpperCamelCase : str = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ : Optional[int] = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ : Tuple = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=snake_case_ )
UpperCamelCase__ : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 718 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase : Any = None
try:
import msvcrt
except ImportError:
__UpperCamelCase : Optional[Any] = None
try:
import fcntl
except ImportError:
__UpperCamelCase : Union[str, Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase : Any = OSError
# Data
# ------------------------------------------------
__UpperCamelCase : Optional[int] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__UpperCamelCase : Dict = "3.0.12"
__UpperCamelCase : str = None
def _a ( ):
"""simple docstring"""
global _logger
UpperCamelCase__ : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Any , lowerCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = lock_file
return None
def __str__( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __magic_name__ :
def __init__( self : List[str] , lowerCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = lock
return None
def __enter__( self : Tuple ) -> str:
'''simple docstring'''
return self.lock
def __exit__( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
self.lock.release()
return None
class __magic_name__ :
def __init__( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=-1 , lowerCamelCase__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase__ : List[Any] = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
UpperCamelCase__ : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase__ : int = None
# The default timeout value.
UpperCamelCase__ : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase__ : List[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase__ : Any = 0
return None
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return self._lock_file
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = float(lowerCamelCase__ )
return None
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : List[str]=0.05 ) -> int:
'''simple docstring'''
if timeout is None:
UpperCamelCase__ : int = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase__ : int = id(self )
UpperCamelCase__ : List[Any] = self._lock_file
UpperCamelCase__ : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase__ : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int]=False ) -> Any:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase__ : List[str] = id(self )
UpperCamelCase__ : Union[str, Any] = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
UpperCamelCase__ : Optional[Any] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.acquire()
return self
def __exit__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.release(force=lowerCamelCase__ )
return None
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : int ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
UpperCamelCase__ : Optional[int] = os.path.dirname(lowerCamelCase__ )
UpperCamelCase__ : List[str] = str(hash(lowerCamelCase__ ) )
UpperCamelCase__ : Optional[int] = filename[: max_length - len(lowerCamelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str=-1 , lowerCamelCase__ : int=None ) -> Optional[int]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
UpperCamelCase__ : int = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase__ : Union[str, Any] = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
UpperCamelCase__ : Union[str, Any] = fd
return None
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str = self._lock_file_fd
UpperCamelCase__ : List[str] = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : str=-1 , lowerCamelCase__ : int=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase__ : int = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
UpperCamelCase__ : Any = fd
return None
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Tuple = self._lock_file_fd
UpperCamelCase__ : int = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase__ : Any = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
UpperCamelCase__ : Optional[Any] = fd
return None
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase__ : List[str] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase : Tuple = None
if msvcrt:
__UpperCamelCase : str = WindowsFileLock
elif fcntl:
__UpperCamelCase : Optional[Any] = UnixFileLock
else:
__UpperCamelCase : Optional[Any] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 106 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ) -> List[Any]:
__lowercase : str = parent
__lowercase : List[Any] = batch_size
__lowercase : Optional[int] = seq_length
__lowercase : Dict = is_training
__lowercase : Optional[int] = use_input_mask
__lowercase : List[Any] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : List[Any] = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : Optional[Any] = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Dict = hidden_dropout_prob
__lowercase : str = attention_probs_dropout_prob
__lowercase : Optional[int] = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Any = type_sequence_label_size
__lowercase : Any = initializer_range
__lowercase : Dict = num_labels
__lowercase : List[str] = num_choices
__lowercase : str = scope
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = None
if self.use_input_mask:
__lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
if self.use_token_type_ids:
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ) -> List[Any]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Dict = BioGptModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__lowercase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
__lowercase : Union[str, Any] = BioGptForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ) -> Tuple:
__lowercase : Any = BioGptModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# create attention mask
__lowercase : str = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase_ )
__lowercase : Dict = self.seq_length // 2
__lowercase : str = 0
# first forward pass
__lowercase ,__lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__lowercase : int = ids_tensor((1,) , UpperCamelCase_ ).item() + 1
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__lowercase : str = random_other_next_tokens
# append to next input_ids and attn_mask
__lowercase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCamelCase_ )] , dim=1 , )
# get two different outputs
__lowercase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''last_hidden_state''']
__lowercase : str = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''last_hidden_state''']
# select random slice
__lowercase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__lowercase : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ) -> Dict:
__lowercase : str = BioGptModel(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
__lowercase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase_ )
# first forward pass
__lowercase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowercase ,__lowercase : List[str] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowercase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''last_hidden_state''']
__lowercase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[
'''last_hidden_state'''
]
# select random slice
__lowercase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False ) -> Union[str, Any]:
__lowercase : Any = BioGptForCausalLM(UpperCamelCase_ )
model.to(UpperCamelCase_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__lowercase : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ ) -> List[str]:
__lowercase : List[str] = BioGptModel(UpperCamelCase_ )
__lowercase : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ) -> Any:
__lowercase : Optional[Any] = self.num_labels
__lowercase : List[str] = BioGptForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : Optional[Any] = config_and_inputs
__lowercase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =(
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase =(BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase =(
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase =False
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : int = BioGptModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _lowerCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : List[Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> str:
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*UpperCamelCase_ , gradient_checkpointing=UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> str:
__lowercase : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(UpperCamelCase_ )
__lowercase : List[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__lowercase : int = '''left'''
# Define PAD Token = EOS Token = 50256
__lowercase : Tuple = tokenizer.eos_token
__lowercase : Optional[Any] = model.config.eos_token_id
# use different length sentences to test batching
__lowercase : Optional[int] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' , padding=UpperCamelCase_ )
__lowercase : Dict = inputs['''input_ids'''].to(UpperCamelCase_ )
__lowercase : str = model.generate(
input_ids=UpperCamelCase_ , attention_mask=inputs['''attention_mask'''].to(UpperCamelCase_ ) , )
__lowercase : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ )
__lowercase : Dict = model.generate(input_ids=UpperCamelCase_ )
__lowercase : int = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__lowercase : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ )
__lowercase : Any = model.generate(input_ids=UpperCamelCase_ , max_length=model.config.max_length - num_paddings )
__lowercase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowercase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase_ )
__lowercase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase_ )
__lowercase : Dict = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [non_padded_sentence, padded_sentence] )
@slow
def _lowerCamelCase ( self ) -> int:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = BioGptModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase ,__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[int] = 3
__lowercase : Optional[Any] = input_dict['''input_ids''']
__lowercase : str = input_ids.ne(1 ).to(UpperCamelCase_ )
__lowercase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase : Optional[Any] = BioGptForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self ) -> str:
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : str = 3
__lowercase : Optional[int] = '''multi_label_classification'''
__lowercase : List[str] = input_dict['''input_ids''']
__lowercase : Dict = input_ids.ne(1 ).to(UpperCamelCase_ )
__lowercase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase : Optional[int] = BioGptForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
__lowercase : List[Any] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
__lowercase : int = model(UpperCamelCase_ )[0]
__lowercase : Dict = 4_23_84
__lowercase : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase_ )
__lowercase : Union[str, Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__lowercase : List[str] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(UpperCamelCase_ )
torch.manual_seed(0 )
__lowercase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(UpperCamelCase_ )
__lowercase : Dict = model.generate(
**UpperCamelCase_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=UpperCamelCase_ , )
__lowercase : str = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase_ )
__lowercase : Optional[int] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 76 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''hf-internal-testing/tiny-random-t5'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE = tokenizer('''This is me''' , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**lowercase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def snake_case__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''hf-internal-testing/tiny-random-t5'''
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 201 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Any , __A : List[Any] = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = value
lowerCAmelCase__ = None # Added in order to delete a node easier
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __repr__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , __A : Union[str, Any] = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = root
def __str__( self : int ) -> Optional[int]:
'''simple docstring'''
return str(self.root )
def lowercase__ ( self : Union[str, Any] , __A : Optional[int] , __A : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__A ): # If it is the right children
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
def lowercase__ ( self : Dict , __A : Dict ) -> List[str]:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return self.root is None
def lowercase__ ( self : str , __A : Tuple ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = Node(__A ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ = new_node
break
else:
lowerCAmelCase__ = parent_node.right
lowerCAmelCase__ = parent_node
def lowercase__ ( self : Dict , *__A : Dict ) -> Union[str, Any]:
'''simple docstring'''
for value in values:
self.__insert(__A )
def lowercase__ ( self : List[str] , __A : Dict ) -> Any:
'''simple docstring'''
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
lowerCAmelCase__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ = node.left if value < node.value else node.right
return node
def lowercase__ ( self : Optional[Any] , __A : List[Any] = None ) -> Optional[Any]:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase__ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ = node.right
return node
def lowercase__ ( self : Dict , __A : Union[str, Any] = None ) -> Union[str, Any]:
'''simple docstring'''
if node is None:
lowerCAmelCase__ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ = self.root
while node.left is not None:
lowerCAmelCase__ = node.left
return node
def lowercase__ ( self : Tuple , __A : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = self.search(__A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__A , __A )
elif node.left is None: # Has only right children
self.__reassign_nodes(__A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__A , node.left )
else:
lowerCAmelCase__ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase__ ( self : int , __A : Dict ) -> str:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase__ ( self : int , __A : int=None ) -> List[str]:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase__ ( self : int , __A : Union[str, Any] , __A : int ) -> List[str]:
'''simple docstring'''
if node:
self.inorder(__A , node.left )
arr.append(node.value )
self.inorder(__A , node.right )
def lowercase__ ( self : Union[str, Any] , __A : Any , __A : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = []
self.inorder(__A , __A ) # append all values to list using inorder traversal
return arr[k - 1]
def _lowerCAmelCase( UpperCAmelCase_ : Optional[int] ) -> Tuple:
lowerCAmelCase__ = []
if curr_node is not None:
lowerCAmelCase__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowerCAmelCase( ) -> Optional[int]:
lowerCAmelCase__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase__ )
# Prints all the elements of the list in order traversal
print(UpperCamelCase__ )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn\'t exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn\'t exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __A : WhisperForConditionalGeneration , __A : WhisperProcessor , __A : AutoencoderKL , __A : CLIPTextModel , __A : CLIPTokenizer , __A : UNetaDConditionModel , __A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __A : StableDiffusionSafetyChecker , __A : CLIPImageProcessor , ) -> List[Any]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__A , speech_processor=__A , vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , feature_extractor=__A , )
def lowercase__ ( self : Optional[Any] , __A : Optional[Union[str, int]] = "auto" ) -> Any:
'''simple docstring'''
if slice_size == "auto":
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
self.enable_attention_slicing(__A )
@torch.no_grad()
def __call__( self : Optional[int] , __A : List[str] , __A : Tuple=1_6000 , __A : int = 512 , __A : int = 512 , __A : int = 50 , __A : float = 7.5 , __A : Optional[Union[str, List[str]]] = None , __A : Optional[int] = 1 , __A : float = 0.0 , __A : Optional[torch.Generator] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , **__A : str , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.speech_processor.feature_extractor(
__A , return_tensors="""pt""" , sampling_rate=__A ).input_features.to(self.device )
lowerCAmelCase__ = self.speech_model.generate(__A , max_length=48_0000 )
lowerCAmelCase__ = self.speech_processor.tokenizer.batch_decode(__A , skip_special_tokens=__A , normalize=__A )[
0
]
if isinstance(__A , __A ):
lowerCAmelCase__ = 1
elif isinstance(__A , __A ):
lowerCAmelCase__ = len(__A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
# get prompt text embeddings
lowerCAmelCase__ = self.tokenizer(
__A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = text_embeddings.shape
lowerCAmelCase__ = text_embeddings.repeat(1 , __A , 1 )
lowerCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , __A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase__ = 42
if negative_prompt is None:
lowerCAmelCase__ = [""""""] * batch_size
elif type(__A ) is not type(__A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !='''
f''' {type(__A )}.''' )
elif isinstance(__A , __A ):
lowerCAmelCase__ = [negative_prompt]
elif batch_size != len(__A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = text_input_ids.shape[-1]
lowerCAmelCase__ = self.tokenizer(
__A , padding="""max_length""" , max_length=__A , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ = uncond_embeddings.shape[1]
lowerCAmelCase__ = uncond_embeddings.repeat(1 , __A , 1 )
lowerCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase__ = torch.randn(__A , generator=__A , device="""cpu""" , dtype=__A ).to(
self.device )
else:
lowerCAmelCase__ = torch.randn(__A , generator=__A , device=self.device , dtype=__A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCAmelCase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_eta:
lowerCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
lowerCAmelCase__ = self.unet(__A , __A , encoder_hidden_states=__A ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase__ ,lowerCAmelCase__ = noise_pred.chunk(2 )
lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
lowerCAmelCase__ = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase__ = self.vae.decode(__A ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(__A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
| 211 | 0 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowercase_ ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase_ : torch.FloatTensor
UpperCAmelCase_ : Optional[torch.FloatTensor] = None
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=0.9_99 , snake_case__="cosine" , ) -> Any:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase = []
for i in range(snake_case__ ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowercase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase_ : str = 1
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 1000 , __SCREAMING_SNAKE_CASE = 0.0_0_0_1 , __SCREAMING_SNAKE_CASE = 0.0_2 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = 1.0 , **__SCREAMING_SNAKE_CASE , ) ->Optional[Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCamelCase__ ) is not None:
lowerCAmelCase = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
lowerCAmelCase = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowerCAmelCase = torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase = torch.linspace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(lowerCamelCase__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowerCAmelCase = 1.0 - self.betas
lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCAmelCase = 1.0
# setable values
lowerCAmelCase = None
lowerCAmelCase = torch.from_numpy(np.arange(0 , lowerCamelCase__ ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Union[str, Any]:
return sample
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->str:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
lowerCAmelCase = num_inference_steps
lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(0 , lowerCamelCase__ ) * step_ratio).round().copy().astype(np.intaa )
lowerCAmelCase = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ) ->List[Any]:
# 1. get previous step value (=t+1)
lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCAmelCase = self.alphas_cumprod[timestep]
lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
lowerCAmelCase = model_output
lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def __len__( self ) ->Tuple:
return self.config.num_train_timesteps
| 312 | import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : Tuple = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def snake_case__ ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
if attention_mask is None:
lowerCAmelCase_: Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase_: Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase_: Optional[int] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_: str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_: Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.0_2 , ):
lowerCAmelCase_: Union[str, Any] = parent
lowerCAmelCase_: Tuple = batch_size
lowerCAmelCase_: Any = seq_length
lowerCAmelCase_: Tuple = is_training
lowerCAmelCase_: Optional[int] = use_labels
lowerCAmelCase_: List[Any] = vocab_size
lowerCAmelCase_: str = hidden_size
lowerCAmelCase_: Union[str, Any] = num_hidden_layers
lowerCAmelCase_: List[str] = num_attention_heads
lowerCAmelCase_: Dict = intermediate_size
lowerCAmelCase_: int = hidden_act
lowerCAmelCase_: Any = hidden_dropout_prob
lowerCAmelCase_: str = attention_probs_dropout_prob
lowerCAmelCase_: Union[str, Any] = max_position_embeddings
lowerCAmelCase_: Any = eos_token_id
lowerCAmelCase_: Union[str, Any] = pad_token_id
lowerCAmelCase_: Union[str, Any] = bos_token_id
lowerCAmelCase_: Optional[int] = initializer_range
def _a ( self ):
lowerCAmelCase_: List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase_: int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase_: Optional[Any] = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
lowerCAmelCase_: Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
lowerCAmelCase_: Optional[Any] = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = 20
lowerCAmelCase_: int = model_class_name(lowerCamelCase__ )
lowerCAmelCase_: Any = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase_: List[str] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCAmelCase_: Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_: Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCAmelCase_: Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase_: Optional[int] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
lowerCAmelCase_: Any = model.decode(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: str = 20
lowerCAmelCase_: Union[str, Any] = model_class_name(lowerCamelCase__ )
lowerCAmelCase_: Tuple = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase_ , lowerCAmelCase_: List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase_: int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase_: Any = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_: Tuple = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCAmelCase_: str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase_: Tuple = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCAmelCase_: Union[str, Any] = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[Any] = 99
def _a ( self ):
lowerCAmelCase_: Optional[int] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase_: Optional[int] = input_ids.shape[0]
lowerCAmelCase_: str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: List[Any] = self._get_config_and_data()
lowerCAmelCase_: Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
lowerCAmelCase_: Any = lm_model(input_ids=lowerCamelCase__ )
lowerCAmelCase_: List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCAmelCase_: Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
lowerCAmelCase_: str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCAmelCase_: str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase_: Any = lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
lowerCAmelCase_: List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCAmelCase_: Optional[Any] = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
lowerCAmelCase_: List[Any] = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase_: Any = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowercase ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: List[Any] = True
SCREAMING_SNAKE_CASE: Optional[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE: Optional[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _a ( self ):
lowerCAmelCase_: Optional[int] = FlaxBlenderbotSmallModelTester(self )
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_: Dict = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest("JIT Enabled" ):
lowerCAmelCase_: Optional[int] = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase_: int = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_: Dict = model_class(lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowerCAmelCase_: Any = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest("JIT Enabled" ):
lowerCAmelCase_: Dict = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase_: Union[str, Any] = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_: Any = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase_: str = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ ) | 613 | 0 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , **__A : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""bs4"""] )
super().__init__(**_UpperCAmelCase )
def lowercase__ ( self : Optional[int] , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCAmelCase__ = parent.find_all(child.name , recursive=_UpperCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_UpperCAmelCase ) else next(i for i, s in enumerate(_UpperCAmelCase , 1 ) if s is child ) )
lowerCAmelCase__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowercase__ ( self : Optional[int] , __A : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = BeautifulSoup(_UpperCAmelCase , """html.parser""" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for element in html_code.descendants:
if type(_UpperCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCAmelCase__ = html.unescape(_UpperCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_UpperCAmelCase )
lowerCAmelCase__ = self.xpath_soup(_UpperCAmelCase )
stringaxtag_seq.append(_UpperCAmelCase )
stringaxsubs_seq.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowercase__ ( self : Tuple , __A : Union[str, Any] , __A : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = ''''''
for tagname, subs in zip(_UpperCAmelCase , _UpperCAmelCase ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self : Optional[Any] , __A : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = False
# Check that strings has a valid type
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase__ = True
elif isinstance(_UpperCAmelCase , (list, tuple) ):
if len(_UpperCAmelCase ) == 0 or isinstance(html_strings[0] , _UpperCAmelCase ):
lowerCAmelCase__ = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f'''but is of type {type(_UpperCAmelCase )}.''' )
lowerCAmelCase__ = bool(isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , _UpperCAmelCase )) )
if not is_batched:
lowerCAmelCase__ = [html_strings]
# Get nodes + xpaths
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for html_string in html_strings:
lowerCAmelCase__ = self.get_three_from_single(_UpperCAmelCase )
nodes.append(_UpperCAmelCase )
lowerCAmelCase__ = []
for node, tag_list, sub_list in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase__ = self.construct_xpath(_UpperCAmelCase , _UpperCAmelCase )
xpath_strings.append(_UpperCAmelCase )
xpaths.append(_UpperCAmelCase )
# return as Dict
lowerCAmelCase__ = {'''nodes''': nodes, '''xpaths''': xpaths}
lowerCAmelCase__ = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
return encoded_inputs
| 713 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def _lowerCAmelCase( ) -> Generator[int, None, None]:
lowerCAmelCase__ = {}
lowerCAmelCase__ = 2
while True:
lowerCAmelCase__ = factor_map.pop(UpperCAmelCase_ , UpperCAmelCase_ )
if factor:
lowerCAmelCase__ = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase__ = factor
else:
lowerCAmelCase__ = prime
yield prime
prime += 1
def _lowerCAmelCase( UpperCAmelCase_ : float = 1E10 ) -> int:
lowerCAmelCase__ = sieve()
lowerCAmelCase__ = 1
while True:
lowerCAmelCase__ = next(UpperCAmelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCAmelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 211 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class A_ ( __a ):
def __init__( self : Tuple , *snake_case__ : Any , **snake_case__ : Tuple ):
super().__init__(*snake_case__ , **snake_case__ )
lowercase = {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Any , *snake_case__ : Tuple , **snake_case__ : str ):
lowercase = super().add_tokens(snake_case__ , *snake_case__ , **snake_case__ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Any , *snake_case__ : int , snake_case__ : List[str]=1 , **snake_case__ : str ):
lowercase = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
else:
lowercase = []
for i in range(snake_case__ ):
lowercase = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowercase = output
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : int , snake_case__ : int=False , snake_case__ : str=1.0 ):
if isinstance(snake_case__ , snake_case__ ):
lowercase = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase = self.token_map[placeholder_token]
lowercase = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
lowercase = text.replace(snake_case__ , """ """.join(snake_case__ ) )
return text
def __call__( self : Optional[Any] , snake_case__ : str , *snake_case__ : Any , snake_case__ : Dict=False , snake_case__ : Any=1.0 , **snake_case__ : Any ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : List[Any] , *snake_case__ : Tuple , snake_case__ : Dict=False , snake_case__ : Optional[Any]=1.0 , **snake_case__ : Tuple ):
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
| 428 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 428 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
lowercase__ = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''adaptor.''' )[-1]
lowercase__ = name.split('''.''' )
if items[1].isdigit():
lowercase__ = int(items[1] )
else:
lowercase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
lowercase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , add_adapter=SCREAMING_SNAKE_CASE , adapter_stride=SCREAMING_SNAKE_CASE , adapter_kernel_size=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , output_hidden_size=SCREAMING_SNAKE_CASE , )
lowercase__ = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
# load model
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
lowercase__ = model[0].eval()
# load feature extractor
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
# load decoder weights
lowercase__ = MBartForCausalLM(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase__ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
lowercase__ = False
lowercase__ = MBartaaTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = '''mbart50'''
lowercase__ = '''wav2vec2'''
lowercase__ = tokenizer.eos_token_id
lowercase__ = 25_00_04
lowercase__ = tokenizer.eos_token_id
lowercase__ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_0004, type=int, help='`decoder_start_token_id` of model config')
lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 711 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = DownBlockaD # noqa F405
_lowercase : str = '''down'''
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = ResnetDownsampleBlockaD # noqa F405
_lowercase : Any = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = AttnDownBlockaD # noqa F405
_lowercase : Any = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
_lowercase : Dict = '''down'''
@property
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = AttnDownEncoderBlockaD # noqa F405
_lowercase : Union[str, Any] = '''down'''
@property
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : Dict = '''mid'''
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : int = '''mid'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = UpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = CrossAttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = AttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = SkipUpBlockaD # noqa F405
_lowercase : int = '''up'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = AttnSkipUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpDecoderBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 429 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __a ( A__ , A__ , A__ , A__ ) -> Any:
lowerCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
lowerCAmelCase = f"{src_lang}-{tgt_lang}"
lowerCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=A__ , exist_ok=A__ )
lowerCAmelCase = os.path.join(A__ , "README.md" )
print(f"Generating {path}" )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(A__ )
# make sure we are under the root of the project
lowercase : Optional[Any] = Path(__file__).resolve().parent.parent.parent
lowercase : Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowercase : Any = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 649 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = ['pixel_values']
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = size if size is not None else {"shortest_edge": 2_2_4}
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name="crop_size" )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : List[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=SCREAMING_SNAKE_CASE )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=SCREAMING_SNAKE_CASE )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 649 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowercase :str = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowercase :Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str=100 , _lowerCamelCase : str=" " ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = text.split(_lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )]
def UpperCAmelCase ( _lowerCamelCase : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_lowerCamelCase ):
titles.append(title if title is not None else "" )
texts.append(_lowerCamelCase )
return {"title": titles, "text": texts}
def UpperCAmelCase ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt" )["input_ids"]
SCREAMING_SNAKE_CASE__ : Dict = ctx_encoder(input_ids.to(device=_lowerCamelCase ) , return_dict=_lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE__ : Dict = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE__ : Tuple = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE__ : str = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE__ : List[str] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase ) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase )
# And save the index
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
default=str(Path(lowercase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
snake_case_ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
snake_case_ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
snake_case_ = field(
default=str(Path(lowercase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
default=lowercase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
snake_case_ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowercase :str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowercase :List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowercase :str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser | 26 | 0 |
from __future__ import annotations
def a ( a ) ->list:
'''simple docstring'''
if len(a ) == 0:
return []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min(a ), max(a )
SCREAMING_SNAKE_CASE = int(max_value - min_value ) + 1
SCREAMING_SNAKE_CASE = [[] for _ in range(a )]
for i in my_list:
buckets[int(i - min_value )].append(a )
return [v for bucket in buckets for v in sorted(a )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5] | 201 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase ( SCREAMING_SNAKE_CASE ) -> list[list[float]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
SCREAMING_SNAKE_CASE_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
SCREAMING_SNAKE_CASE_ = [[0.0, 0.0], [0.0, 0.0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = matrix[1][1], matrix[0][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
SCREAMING_SNAKE_CASE_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
SCREAMING_SNAKE_CASE_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
SCREAMING_SNAKE_CASE_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
SCREAMING_SNAKE_CASE_ = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
SCREAMING_SNAKE_CASE_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
SCREAMING_SNAKE_CASE_ = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 205 | 0 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self) -> None:
"""simple docstring"""
a_ =[2, 1, 2, -1]
a_ =[1, 2, 3, 4]
def lowercase_ ( self) -> list[float]:
"""simple docstring"""
a_ =len(self.first_signal)
a_ =len(self.second_signal)
a_ =max(lowerCAmelCase_ , lowerCAmelCase_)
# create a zero matrix of max_length x max_length
a_ =[[0] * max_length for i in range(lowerCAmelCase_)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase_):
a_ =deque(self.second_signal)
rotated_signal.rotate(lowerCAmelCase_)
for j, item in enumerate(lowerCAmelCase_):
matrix[i][j] += item
# multiply the matrix with the first signal
a_ =np.matmul(np.transpose(lowerCAmelCase_) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowerCAmelCase_ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A ( __lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = (DPMSolverSDEScheduler,)
__UpperCAmelCase : int = 10
def __lowerCAmelCase ( self , **snake_case_ ) -> Tuple:
_a = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_UpperCAmelCase )
return config
def __lowerCAmelCase ( self ) -> int:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def __lowerCAmelCase ( self ) -> int:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def __lowerCAmelCase ( self ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def __lowerCAmelCase ( self ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def __lowerCAmelCase ( self ) -> Any:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_a = model(_UpperCAmelCase , _UpperCAmelCase )
_a = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_a = output.prev_sample
_a = torch.sum(torch.abs(_UpperCAmelCase ) )
_a = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type="v_prediction" )
_a = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_a = model(_UpperCAmelCase , _UpperCAmelCase )
_a = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_a = output.prev_sample
_a = torch.sum(torch.abs(_UpperCAmelCase ) )
_a = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __lowerCAmelCase ( self ) -> Dict:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_a = self.dummy_model()
_a = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_a = model(_UpperCAmelCase , _UpperCAmelCase )
_a = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_a = output.prev_sample
_a = torch.sum(torch.abs(_UpperCAmelCase ) )
_a = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**_UpperCAmelCase , use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_a = self.dummy_model()
_a = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
_a = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_a = model(_UpperCAmelCase , _UpperCAmelCase )
_a = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_a = output.prev_sample
_a = torch.sum(torch.abs(_UpperCAmelCase ) )
_a = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 131 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotSmallConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
UpperCAmelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 663 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCamelCase__ = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
UpperCamelCase__ = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase__ : Optional[int] = state_dict.pop(lowerCAmelCase__ )
# emb -> embedding
if name.startswith('''emb.''' ):
UpperCAmelCase__ : Any = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
UpperCAmelCase__ : Any = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
UpperCAmelCase__ : Optional[Any] = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , lowerCAmelCase__ )
# ffn -> feed_forward
UpperCAmelCase__ : Optional[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , lowerCAmelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
UpperCAmelCase__ : int = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
UpperCAmelCase__ : Any = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
UpperCAmelCase__ : Optional[int] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
UpperCAmelCase__ : List[str] = '''rwkv.''' + name
UpperCAmelCase__ : Dict = weight
return state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=None ) -> List[str]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
UpperCAmelCase__ : Tuple = 5_02_77
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
UpperCAmelCase__ : List[Any] = PreTrainedTokenizerFast(tokenizer_file=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
# 2. Build the config
UpperCAmelCase__ : str = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase__ : Dict = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
UpperCAmelCase__ : int = RwkvConfig(
vocab_size=lowerCAmelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCAmelCase__ )
# 3. Download model file then convert state_dict
UpperCAmelCase__ : str = hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
UpperCAmelCase__ : Optional[int] = convert_state_dict(lowerCAmelCase__ )
# 4. Split in shards and save
UpperCAmelCase__ , UpperCAmelCase__ : Dict = shard_checkpoint(lowerCAmelCase__ )
for shard_file, shard in shards.items():
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
if index is not None:
UpperCAmelCase__ : Optional[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
# Save the index as well
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : Optional[Any] = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '''\n'''
f.write(lowerCAmelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
UpperCAmelCase__ : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase__ : List[Any] = torch.load(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
UpperCAmelCase__ : int = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 312 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'fnet'
def __init__( self : List[str] , _A : Dict=32_000 , _A : Optional[Any]=768 , _A : Tuple=12 , _A : int=3_072 , _A : Union[str, Any]="gelu_new" , _A : int=0.1 , _A : List[Any]=512 , _A : List[str]=4 , _A : Optional[int]=0.0_2 , _A : List[str]=1e-12 , _A : Union[str, Any]=False , _A : Any=512 , _A : int=3 , _A : str=1 , _A : List[str]=2 , **_A : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : Tuple = use_tpu_fourier_optimizations
UpperCAmelCase__ : Union[str, Any] = tpu_short_seq_length
| 312 | 1 |
"""simple docstring"""
import argparse
import datetime
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowercase_ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowercase_ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowercase_ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowercase_ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowercase_ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowercase_ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowercase_ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
lowercase_ = y - 1
lowercase_ = m + 12
# maths var
lowercase_ = int(str(UpperCamelCase__ )[:2] )
lowercase_ = int(str(UpperCamelCase__ )[2:] )
lowercase_ = int(2.6 * m - 5.39 )
lowercase_ = int(c / 4 )
lowercase_ = int(k / 4 )
lowercase_ = int(d + k )
lowercase_ = int(t + u + v + x )
lowercase_ = int(z - (2 * c) )
lowercase_ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowercase_ = F'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
UpperCAmelCase : Any = parser.parse_args()
zeller(args.date_input)
| 567 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_snake_case = get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
UpperCamelCase : str = '''all_checks'''
UpperCamelCase : Any = '''basic_checks'''
UpperCamelCase : Union[str, Any] = '''no_checks'''
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_a : List[str] = """ for """ + verification_name if verification_name is not None else """"""
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : List[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info("""All the splits matched successfully.""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
if record_checksum:
_a : int = shaaaa()
with open(UpperCamelCase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"""""" ):
m.update(UpperCamelCase__ )
_a : List[Any] = m.hexdigest()
else:
_a : Any = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 389 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase__ = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , **snake_case__ : str ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , **snake_case__ : Dict ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(snake_case__ , return_tensors="""np""" )
lowerCAmelCase__ = processor(images=snake_case__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase__ = """lower newer"""
lowerCAmelCase__ = processor(text=snake_case__ )
lowerCAmelCase__ = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase__ = """lower newer"""
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(snake_case__ )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase__ = """lower newer"""
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 705 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.