code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
SCREAMING_SNAKE_CASE :List[str] = None
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE :int = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE :Dict = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
SCREAMING_SNAKE_CASE :Dict = '▁'
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = AlbertTokenizer
def __init__( self : Dict ,A : Optional[int]=None ,A : Dict=None ,A : int=True ,A : Any=True ,A : List[str]=False ,A : str="[CLS]" ,A : Any="[SEP]" ,A : Dict="<unk>" ,A : int="[SEP]" ,A : int="<pad>" ,A : str="[CLS]" ,A : Tuple="[MASK]" ,**A : Optional[int] ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__A = (
AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ,normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase ,__lowerCamelCase )
else mask_token
)
super().__init__(
__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,do_lower_case=__lowerCamelCase ,remove_space=__lowerCamelCase ,keep_accents=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,)
__A = do_lower_case
__A = remove_space
__A = keep_accents
__A = vocab_file
__A = False if not self.vocab_file else True
def UpperCamelCase_ ( self : List[Any] ,A : Optional[Any] ,A : int = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str ,A : int ,A : str = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : Tuple = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
__lowerCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 15 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( lowercase__ , unittest.TestCase):
_a = MobileBertTokenizer
_a = MobileBertTokenizerFast
_a = True
_a = True
_a = filter_non_english
_a = """google/mobilebert-uncased"""
def SCREAMING_SNAKE_CASE ( self: str ):
super().setUp()
lowercase :List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowercase :Optional[int] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Tuple ):
lowercase :Optional[Any] = "UNwant\u00E9d,running"
lowercase :List[str] = "unwanted, running"
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :List[Any] = self.tokenizer_class(self.vocab_file )
lowercase :Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowercase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
if not self.test_rust_tokenizer:
return
lowercase :Optional[Any] = self.get_tokenizer()
lowercase :Dict = self.get_rust_tokenizer()
lowercase :Dict = "UNwant\u00E9d,running"
lowercase :Any = tokenizer.tokenize(lowercase_ )
lowercase :List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase :Optional[int] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase :List[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase :Optional[Any] = self.get_rust_tokenizer()
lowercase :Optional[int] = tokenizer.encode(lowercase_ )
lowercase :Optional[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# With lower casing
lowercase :Optional[int] = self.get_tokenizer(do_lower_case=lowercase_ )
lowercase :List[Any] = self.get_rust_tokenizer(do_lower_case=lowercase_ )
lowercase :Tuple = "UNwant\u00E9d,running"
lowercase :int = tokenizer.tokenize(lowercase_ )
lowercase :int = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase :Union[str, Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase :Optional[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase :Dict = self.get_rust_tokenizer()
lowercase :List[Any] = tokenizer.encode(lowercase_ )
lowercase :List[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :List[str] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Tuple = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :int = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :str = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Dict = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Dict = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :List[Any] = BasicTokenizer(do_lower_case=lowercase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Tuple = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowercase :List[str] = {}
for i, token in enumerate(lowercase_ ):
lowercase :Optional[Any] = i
lowercase :List[str] = WordpieceTokenizer(vocab=lowercase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def SCREAMING_SNAKE_CASE ( self: Any ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[int] = self.get_tokenizer()
lowercase :Tuple = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Any = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowercase :str = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ )
lowercase :Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ )
lowercase :Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase :Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase :Tuple = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase :Optional[int] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowercase :Optional[Any] = tokenizer_r.encode_plus(
lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ , )
lowercase :Union[str, Any] = tokenizer_r.do_lower_case if hasattr(lowercase_ , "do_lower_case" ) else False
lowercase :Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :List[str] = ["的", "人", "有"]
lowercase :Any = "".join(lowercase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase :Tuple = True
lowercase :str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase :Optional[int] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase :Optional[int] = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase :Union[str, Any] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase :Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowercase :Optional[Any] = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase :Optional[int] = False
lowercase :Optional[int] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase :Dict = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase :List[str] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase :str = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase :Optional[int] = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowercase :int = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase :Any = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(lowercase_ )
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 351 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCAmelCase : List[str] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase__ ( ):
lowercase :int = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
lowercase :Dict = bs[:]
lowercase :List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowercase :List[str] = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase, lowerCamelCase ) )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[Any] = set()
lowercase :Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase :List[str] = char
return pairs
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any]="replace" , _lowerCAmelCase: int="<s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: Optional[int]="<s>" , _lowerCAmelCase: Optional[int]="<unk>" , _lowerCAmelCase: Any="<pad>" , _lowerCAmelCase: Optional[Any]="<mask>" , _lowerCAmelCase: Union[str, Any]=False , **_lowerCAmelCase: Dict , ):
lowercase :Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
lowercase :List[str] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
lowercase :Any = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
lowercase :List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
lowercase :str = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle:
lowercase :List[str] = json.load(_lowerCAmelCase )
lowercase :Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase :Dict = errors # how to handle errors in decoding
lowercase :Any = bytes_to_unicode()
lowercase :str = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding="utf-8" ) as merges_handle:
lowercase :List[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase :Tuple = [tuple(merge.split() ) for merge in bpe_merges]
lowercase :Optional[int] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase :Tuple = {}
lowercase :List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase :Optional[int] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self: int ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self: Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Optional[int] ):
if token in self.cache:
return self.cache[token]
lowercase :Tuple = tuple(_lowerCAmelCase )
lowercase :List[str] = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
lowercase :List[str] = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase :List[Any] = bigram
lowercase :str = []
lowercase :Tuple = 0
while i < len(_lowerCAmelCase ):
try:
lowercase :List[str] = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase :Optional[Any] = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase :Dict = tuple(_lowerCAmelCase )
lowercase :Optional[Any] = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
lowercase :List[str] = get_pairs(_lowerCAmelCase )
lowercase :str = " ".join(_lowerCAmelCase )
lowercase :Optional[Any] = word
return word
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[int] ):
lowercase :str = []
for token in re.findall(self.pat , _lowerCAmelCase ):
lowercase :str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Tuple ):
return self.decoder.get(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
lowercase :Optional[int] = "".join(_lowerCAmelCase )
lowercase :List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :List[str] = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase :Optional[int] = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + "\n" )
lowercase :Tuple = 0
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowercase :Tuple = token_index
writer.write(" ".join(_lowerCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :Tuple = [self.cls_token_id]
lowercase :int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :List[str] = [self.sep_token_id]
lowercase :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int=False , **_lowerCAmelCase: Dict ):
lowercase :Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
lowercase :List[Any] = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , ):
lowercase :Tuple = super()._pad(
encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase :Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase :Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase :Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(_lowerCAmelCase )
if needs_to_be_padded:
lowercase :Optional[int] = len(_lowerCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase :List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase :int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 158 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def A__ ( UpperCAmelCase_ ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def A__ ( ):
_UpperCamelCase : Tuple = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=UpperCAmelCase_ )
_UpperCamelCase : Dict = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCAmelCase_ )
EnvironmentCommand.register_subcommand(UpperCAmelCase_ )
TestCommand.register_subcommand(UpperCAmelCase_ )
RunBeamCommand.register_subcommand(UpperCAmelCase_ )
DummyDataCommand.register_subcommand(UpperCAmelCase_ )
# Parse args
_UpperCamelCase , _UpperCamelCase : List[Any] = parser.parse_known_args()
if not hasattr(UpperCAmelCase_ , 'func' ):
parser.print_help()
exit(1 )
_UpperCamelCase : List[str] = parse_unknown_args(UpperCAmelCase_ )
# Run
_UpperCamelCase : Tuple = args.func(UpperCAmelCase_ , **UpperCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 83 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict:
"""simple docstring"""
a_ : Tuple = script.contents[0]
a_ : int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
a_ : Tuple = F"""https://www.instagram.com/{username}/"""
a_ : Optional[Any] = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict:
a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ) -> str:
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : Optional[int] ) -> str:
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
a_ : int = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Union[str, Any] = InstagramUser('github')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 32 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase__ : Optional[int] = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase__ : Optional[int] = BASE_URL + '/user'
# https://github.com/settings/tokens
lowercase__ : List[Any] = os.environ.get('USER_TOKEN', '')
def a__ ( lowercase : str ) -> dict[Any, Any]:
"""simple docstring"""
_UpperCamelCase = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(lowercase, headers=lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 287 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=13 , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : str=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=4 , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = True
_snake_case : Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 287 | 1 |
__lowerCamelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.3_5_5_8_1_8,
}
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase : Any = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : str = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : int = num_labels
UpperCamelCase : Optional[int] = scope
UpperCamelCase : int = range_bbox
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase : Union[str, Any] = bbox[i, j, 3]
UpperCamelCase : int = bbox[i, j, 1]
UpperCamelCase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase : List[str] = bbox[i, j, 2]
UpperCamelCase : Optional[int] = bbox[i, j, 0]
UpperCamelCase : Optional[Any] = t
UpperCamelCase : Dict = None
if self.use_input_mask:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase : str = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Dict = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = LiltModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ )
UpperCamelCase : Any = model(A_ , bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Dict = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Union[str, Any] = False
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
return True
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = LiltModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Dict = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ )
UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ )
UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ )
UpperCamelCase : List[str] = torch.Size([1, 2, 768] )
UpperCamelCase : Any = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
| 52 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _a ( a :dict ) -> int:
return (data["data"], data["target"])
def _a ( a :np.ndarray , a :np.ndarray ) -> Optional[Any]:
a = XGBClassifier()
classifier.fit(A__ , A__ )
return classifier
def _a ( ) -> int:
a = load_iris()
a , a = data_handling(A__ )
a , a , a , a = train_test_split(
A__ , A__ , test_size=0.25 )
a = iris['''target_names''']
# Create an XGBoost Classifier from the training data
a = xgboost(A__ , A__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A__ , A__ , A__ , display_labels=A__ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 356 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = torch.device("cpu")
def _a ( ) -> Union[str, Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
def _a ( a :Dict ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _a ( a :int , a :Any , a :Union[str, Any] ) -> int:
a = dct.pop(a )
a = val
def _a ( a :Any ) -> Dict:
a = []
for k in state_dict.keys():
a = k
if ".pwconv" in k:
a = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
a = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
a = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
a = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
a = k_new.split('''.''' )
if ls[2].isdigit():
a = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
a = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( a :List[Any] , a :Tuple , a :List[str] ) -> Union[str, Any]:
a = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a = [3, 3, 6, 4]
a = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a = [3, 3, 9, 6]
a = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a = [4, 3, 10, 5]
a = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a = [4, 4, 12, 6]
a = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' , check_hash=a )
else:
a = torch.load(a , map_location='''cpu''' )
a = checkpoint
a = create_rename_keys(a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a , a , a )
# load HuggingFace model
a = SwiftFormerForImageClassification(a ).eval()
hf_model.load_state_dict(a )
# prepare test inputs
a = prepare_img()
a = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
a = processor(images=a , return_tensors='''pt''' )
# compare outputs from both models
a = get_expected_output(a )
a = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , a , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCAmelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 26 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : Any = logging.get_logger(__name__)
A : int = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
A : Union[str, Any] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
A : Any = {"facebook/blenderbot-3B": 1_2_8}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =VOCAB_FILES_NAMES
__UpperCAmelCase : str =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] =["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[str] =BlenderbotTokenizer
def __init__( self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ):
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
__lowerCAmelCase = getattr(__a , pre_tok_state.pop("type" ) )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = pre_tok_class(**__a )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = "post_processor"
__lowerCAmelCase = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
__lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase = tuple(state["sep"] )
if "cls" in state:
__lowerCAmelCase = tuple(state["cls"] )
__lowerCAmelCase = False
if state.get("add_prefix_space" , __a ) != add_prefix_space:
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = True
if state.get("trim_offsets" , __a ) != trim_offsets:
__lowerCAmelCase = trim_offsets
__lowerCAmelCase = True
if changes_to_apply:
__lowerCAmelCase = getattr(__a , state.pop("type" ) )
__lowerCAmelCase = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __a ):
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
__lowerCAmelCase = value
def snake_case ( self , *__a , **__a ):
__lowerCAmelCase = kwargs.get("is_split_into_words" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def snake_case ( self , *__a , **__a ):
__lowerCAmelCase = kwargs.get("is_split_into_words" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , __a , __a = None ):
return token_ids_a + [self.eos_token_id]
def snake_case ( self , __a ):
__lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
__lowerCAmelCase = " ".join(__a )
__lowerCAmelCase = self.encode(__a )
if len(__a ) > self.model_max_length:
__lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 57 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""LayoutLMv2FeatureExtractor"""]
_lowercase = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = 384
_lowerCAmelCase = 2
_lowerCAmelCase = 4
_lowerCAmelCase = 37
_lowerCAmelCase = """gelu"""
_lowerCAmelCase = 0.1
_lowerCAmelCase = 0.1
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 2
_lowerCAmelCase = 0.02
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = 2
_lowerCAmelCase = 9
_lowerCAmelCase = 1
_lowerCAmelCase = None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel(config=_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForMaskedLM(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForSequenceClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFConvBertForMultipleChoice(config=_lowercase )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForTokenClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForQuestionAnswering(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : Dict = False
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = True
if hasattr(_lowercase , """use_cache""" ):
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
for model_class in self.all_model_classes:
_lowerCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
_lowerCAmelCase = os.path.join(_lowercase , """saved_model""" , """1""" )
_lowerCAmelCase = tf.keras.models.load_model(_lowercase )
_lowerCAmelCase = model(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = outputs["""encoder_hidden_states"""]
_lowerCAmelCase = outputs["""encoder_attentions"""]
else:
_lowerCAmelCase = outputs["""hidden_states"""]
_lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(_lowercase ) , _lowercase )
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
def check_decoder_attentions_output(_lowercase ):
_lowerCAmelCase = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
_lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowercase ):
_lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(_lowercase )[0]
_lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
_lowerCAmelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 229 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __snake_case ( lowerCAmelCase ):
_a : str= "gpt_neo"
_a : Optional[int]= ["past_key_values"]
_a : Dict= {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self ,snake_case=50257 ,snake_case=2048 ,snake_case=2048 ,snake_case=24 ,snake_case=[[["global", "local"], 12]] ,snake_case=16 ,snake_case=None ,snake_case=256 ,snake_case="gelu_new" ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.1 ,snake_case=1e-5 ,snake_case=0.02 ,snake_case=True ,snake_case=50256 ,snake_case=50256 ,**snake_case ,):
'''simple docstring'''
lowercase : int = vocab_size
lowercase : Union[str, Any] = max_position_embeddings
lowercase : Dict = hidden_size
lowercase : Union[str, Any] = num_layers
lowercase : Union[str, Any] = num_heads
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = window_size
lowercase : Optional[int] = activation_function
lowercase : List[str] = resid_dropout
lowercase : int = embed_dropout
lowercase : Optional[int] = attention_dropout
lowercase : Tuple = classifier_dropout
lowercase : Optional[int] = layer_norm_epsilon
lowercase : Dict = initializer_range
lowercase : List[str] = use_cache
lowercase : Optional[int] = bos_token_id
lowercase : int = eos_token_id
lowercase : Union[str, Any] = attention_types
lowercase : Dict = self.expand_attention_types_params(snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : List[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
import torch
lowercase : Tuple = input.size()
lowercase : int = len(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = shape[dimension]
lowercase : int = torch.arange(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.div(sizedim - size , SCREAMING_SNAKE_CASE__ , rounding_mode="""floor""" ) + 1
lowercase : Dict = torch.arange(SCREAMING_SNAKE_CASE__ ) + low_indices[:min_length][:, None]
lowercase : Union[str, Any] = [slice(SCREAMING_SNAKE_CASE__ )] * rank
lowercase : Optional[Any] = indices
lowercase : List[str] = input[s]
lowercase : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
import torch
lowercase : Union[str, Any] = torch.arange(1 , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.remainder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = remainders == 0
lowercase : Optional[int] = candidates[divisor_indices]
lowercase : List[Any] = torch.max(SCREAMING_SNAKE_CASE__ )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rounding_mode="""floor""" )
class __snake_case ( lowerCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case ,direction="""inputs""" )
lowercase : Dict = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowercase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self._config.num_heads
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : Any = super(snake_case ,self ).generate_dummy_inputs(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
# We need to order the input in the way they appears in the forward()
lowercase : List[str] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase : Optional[int] = seqlen + 2
lowercase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Optional[Any] = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers )
]
lowercase : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
lowercase : Any = ordered_inputs["""attention_mask"""].dtype
lowercase : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case ,snake_case ,dtype=snake_case )] ,dim=1 )
return ordered_inputs
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 13
| 20 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
_A = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_A = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_A = [2, 4, 1, 5]
_A = len(train_data)
_A = 0.009
def a__ ( lowerCAmelCase , lowerCAmelCase="train" ) -> int:
return calculate_hypothesis_value(lowerCAmelCase , lowerCAmelCase ) - output(
lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : Dict = 0
for i in range(len(lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( lowerCAmelCase , lowerCAmelCase=m ) -> List[str]:
UpperCAmelCase__ : Optional[int] = 0
for i in range(lowerCAmelCase ):
if index == -1:
summation_value += _error(lowerCAmelCase )
else:
summation_value += _error(lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : int = summation_of_cost_derivative(lowerCAmelCase , lowerCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[str]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase__ : Any = 0.00_0002
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Union[str, Any] = 0
while True:
j += 1
UpperCAmelCase__ : str = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase ) ):
UpperCAmelCase__ : List[Any] = get_cost_derivative(i - 1 )
UpperCAmelCase__ : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase , rtol=lowerCAmelCase , ):
break
UpperCAmelCase__ : Any = temp_parameter_vector
print(("""Number of iterations:""", j) )
def a__ ( ) -> Optional[int]:
for i in range(len(lowerCAmelCase ) ):
print(("""Actual output value:""", output(lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 166 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3FeatureExtractor"""]
_A = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _a ( lowerCamelCase ):
lowerCamelCase : Any = args.pruning_method
lowerCamelCase : Dict = args.threshold
lowerCamelCase : Union[str, Any] = args.model_name_or_path.rstrip("""/""" )
lowerCamelCase : Any = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
lowerCamelCase : List[Any] = torch.load(os.path.join(lowerCamelCase, """pytorch_model.bin""" ) )
lowerCamelCase : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCamelCase : Union[str, Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
lowerCamelCase : List[Any] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
lowerCamelCase : int = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
lowerCamelCase : Optional[int] = MagnitudeBinarizer.apply(inputs=lowerCamelCase, threshold=lowerCamelCase )
lowerCamelCase : Tuple = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCamelCase : Any = name[:-6]
lowerCamelCase : str = model[F'''{prefix_}mask_scores''']
lowerCamelCase : Union[str, Any] = TopKBinarizer.apply(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCamelCase : str = name[:-6]
lowerCamelCase : int = model[F'''{prefix_}mask_scores''']
lowerCamelCase : str = ThresholdBinarizer.apply(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Optional[int] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCamelCase : Tuple = name[:-6]
lowerCamelCase : List[Any] = model[F'''{prefix_}mask_scores''']
lowerCamelCase , lowerCamelCase : Dict = -0.1, 1.1
lowerCamelCase : List[str] = torch.sigmoid(lowerCamelCase )
lowerCamelCase : Optional[int] = s * (r - l) + l
lowerCamelCase : Dict = s_bar.clamp(min=0.0, max=1.0 )
lowerCamelCase : Optional[int] = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowerCamelCase : Optional[int] = os.path.join(
os.path.dirname(lowerCamelCase ), F'''bertarized_{os.path.basename(lowerCamelCase )}''' )
if not os.path.isdir(lowerCamelCase ):
shutil.copytree(lowerCamelCase, lowerCamelCase )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(lowerCamelCase, os.path.join(lowerCamelCase, """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase =parser.parse_args()
main(args)
| 287 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 1 |
import heapq
import sys
import numpy as np
lowerCamelCase_ = tuple[int, int]
class a_ :
'''simple docstring'''
def __init__( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = []
lowerCAmelCase_ = set()
def _lowercase ( self ) -> str:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return len(self.elements ) == 0
def _lowercase ( self , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowercase_ )
else:
# update
# print("update", item)
lowerCAmelCase_ = []
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if item in self.set:
self.set.remove(lowercase_ )
lowerCAmelCase_ = []
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowercase ( self ) -> str:
'''simple docstring'''
return self.elements[0][1]
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
self.set.remove(lowercase_ )
return (priority, item)
def lowerCamelCase ( a_ , a_ ):
# euclidean distance
lowerCAmelCase_ = np.array(a_ )
lowerCAmelCase_ = np.array(a_ )
return np.linalg.norm(a - b )
def lowerCamelCase ( a_ , a_ ):
# integer division by time variable
return consistent_heuristic(a_ , a_ ) // t
def lowerCamelCase ( a_ , a_ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase ( a_ , a_ , a_ , a_ ):
lowerCAmelCase_ = g_function[start] + Wa * heuristics[i](a_ , a_ )
return ans
def lowerCamelCase ( a_ , a_ , a_ ):
lowerCAmelCase_ = np.chararray((n, n) )
for i in range(a_ ):
for j in range(a_ ):
lowerCAmelCase_ = '*'
for i in range(a_ ):
for j in range(a_ ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase_ = '#'
lowerCAmelCase_ = '-'
lowerCAmelCase_ = back_pointer[goal]
while x != start:
((lowerCAmelCase_) , (lowerCAmelCase_)) = x
# print(x)
lowerCAmelCase_ = '-'
lowerCAmelCase_ = back_pointer[x]
lowerCAmelCase_ = '-'
for i in range(a_ ):
for j in range(a_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCAmelCase_ = back_pointer[goal]
while x != start:
print(a_ , end=' ' )
lowerCAmelCase_ = back_pointer[x]
print(a_ )
sys.exit()
def lowerCamelCase ( a_ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
for itera in range(a_ ):
open_list[itera].remove_element(a_ )
# print("s", s)
# print("j", j)
((lowerCAmelCase_) , (lowerCAmelCase_)) = s
lowerCAmelCase_ = (x - 1, y)
lowerCAmelCase_ = (x + 1, y)
lowerCAmelCase_ = (x, y + 1)
lowerCAmelCase_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a_ )
lowerCAmelCase_ = -1
lowerCAmelCase_ = float('inf' )
if valid(a_ ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase_ = g_function[s] + 1
lowerCAmelCase_ = s
if neighbours not in close_list_anchor:
open_list[0].put(a_ , key(a_ , 0 , a_ , a_ ) )
if neighbours not in close_list_inad:
for var in range(1 , a_ ):
if key(a_ , a_ , a_ , a_ ) <= Wa * key(
a_ , 0 , a_ , a_ ):
open_list[j].put(
a_ , key(a_ , a_ , a_ , a_ ) )
def lowerCamelCase ( ):
lowerCAmelCase_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCamelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCamelCase_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
lowerCamelCase_ = make_common_ground()
lowerCamelCase_ = blocks_blk
# hyper parameters
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = 2_0
lowerCamelCase_ = 3 # one consistent and two other inconsistent
# start and end destination
lowerCamelCase_ = (0, 0)
lowerCamelCase_ = (n - 1, n - 1)
lowerCamelCase_ = 1
def lowerCamelCase ( a_ , a_ , a_ ):
lowerCAmelCase_ = {start: 0, goal: float('inf' )}
lowerCAmelCase_ = {start: -1, goal: -1}
lowerCAmelCase_ = []
lowerCAmelCase_ = set()
for i in range(a_ ):
open_list.append(PriorityQueue() )
open_list[i].put(a_ , key(a_ , a_ , a_ , a_ ) )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , a_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(a_ , a_ , a_ )
else:
lowerCAmelCase_ , lowerCAmelCase_ = open_list[i].top_show()
visited.add(a_ )
expand_state(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
close_list_inad.append(a_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(a_ , a_ , a_ )
else:
lowerCAmelCase_ = open_list[0].top_show()
visited.add(a_ )
expand_state(
a_ , 0 , a_ , a_ , a_ , a_ , a_ , a_ , )
close_list_anchor.append(a_ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a_ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 360 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCamelCase : int = False
@skip_mps
class SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionAttendAndExcitePipeline
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(UpperCamelCase__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=0 ):
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
else:
UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCamelCase = UpperCamelCase = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu'
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
UpperCamelCase = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
def A ( self : Any ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A ( self : int ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def A ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A ( self : Optional[int] ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A ( self : List[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
@classmethod
def A ( cls : Any ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(5_1 )
UpperCamelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase = 'a painting of an elephant with glasses'
UpperCamelCase = [5, 7]
UpperCamelCase = pipe(
prompt=UpperCamelCase__ , token_indices=UpperCamelCase__ , guidance_scale=7.5 , generator=UpperCamelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Dict = 'glpn'
def __init__( self : Optional[int] , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : str=[2, 2, 2, 2] , __snake_case : Optional[Any]=[8, 4, 2, 1] , __snake_case : List[Any]=[32, 64, 1_60, 2_56] , __snake_case : List[str]=[7, 3, 3, 3] , __snake_case : Dict=[4, 2, 2, 2] , __snake_case : Union[str, Any]=[1, 2, 5, 8] , __snake_case : str=[4, 4, 4, 4] , __snake_case : Union[str, Any]="gelu" , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : str=0.02 , __snake_case : int=0.1 , __snake_case : Dict=1E-6 , __snake_case : Optional[Any]=64 , __snake_case : Optional[Any]=10 , __snake_case : Union[str, Any]=-1 , **__snake_case : List[str] , ):
super().__init__(**__lowerCAmelCase )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_encoder_blocks
UpperCAmelCase_ = depths
UpperCAmelCase_ = sr_ratios
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = patch_sizes
UpperCAmelCase_ = strides
UpperCAmelCase_ = mlp_ratios
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = decoder_hidden_size
UpperCAmelCase_ = max_depth
UpperCAmelCase_ = head_in_index
| 360 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _snake_case ( lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCAmelCase_ :Tuple = k.replace(snake_case_ , snake_case_ )
if k.startswith("""encoder""" ):
lowerCAmelCase_ :List[Any] = k.replace(""".attn""" , """.self_attn""" )
lowerCAmelCase_ :int = k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCAmelCase_ :Any = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowerCAmelCase_ :Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCAmelCase_ :str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowerCAmelCase_ :int = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _snake_case ( lowercase__ : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Any = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowerCAmelCase_ :Optional[Any] = sd.pop(snake_case_ )
lowerCAmelCase_ :Tuple = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowerCAmelCase_ :Any = v
__UpperCAmelCase = ['''START''']
@torch.no_grad()
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = torch.load(snake_case_ , map_location="""cpu""" )
lowerCAmelCase_ :List[str] = model["""model"""]
lowerCAmelCase_ :Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
lowerCAmelCase_ :Any = BlenderbotForConditionalGeneration(snake_case_ )
lowerCAmelCase_ :Tuple = m.model.state_dict().keys()
lowerCAmelCase_ :Any = []
lowerCAmelCase_ :Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCAmelCase_ :List[Any] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCAmelCase_ :int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_ , strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__UpperCAmelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 84 | '''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_A : Optional[Any] = logging.get_logger(__name__)
# General docstring
_A : Optional[Any] = '''ResNetConfig'''
# Base docstring
_A : Tuple = '''microsoft/resnet-50'''
_A : List[str] = [1, 2048, 7, 7]
# Image classification docstring
_A : str = '''microsoft/resnet-50'''
_A : Dict = '''tiger cat'''
_A : List[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Any:
super().__init__()
__lowerCAmelCase = nn.Convad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> List[str]:
super().__init__()
__lowerCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCAmelCase = config.num_channels
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
return embedding
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict:
super().__init__()
__lowerCAmelCase = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Dict:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = out_channels // reduction
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> int:
super().__init__()
__lowerCAmelCase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = input
for layer in self.layers:
__lowerCAmelCase = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention:
__lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
__lowerCAmelCase = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = ResNetConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = """resnet"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """pixel_values"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> int:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = value
_A : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = ResNetModel(SCREAMING_SNAKE_CASE__ )
# classification head
__lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.resnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase = self.classifier(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = """single_label_classification"""
else:
__lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE__ )
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [config.embedding_size] + config.hidden_sizes
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BackboneOutput:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE__ , )
| 229 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase__ : Optional[Any] = sys.version_info >= (3, 10)
def UpperCAmelCase ( a_=None , a_=None ) -> List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_a )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = field(default='''toto''', metadata={'''help''': '''help message'''} )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase = 'titi'
lowerCamelCase = 'toto'
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase = 'titi'
lowerCamelCase = 'toto'
lowerCamelCase = 42
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = "toto"
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = BasicEnum(self.foo )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = "toto"
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'''help''': '''help message'''} )
lowerCamelCase = None
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[] )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[1, 2, 3] )
lowerCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCamelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = field()
lowerCamelCase = field()
lowerCamelCase = field()
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = field()
lowerCamelCase = None
lowerCamelCase = field(default='''toto''', metadata={'''help''': '''help message'''} )
lowerCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'''help''': '''help message'''} )
lowerCamelCase = None
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Tuple = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k != "container"}
A_ : str = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _SCREAMING_SNAKE_CASE ) and yy.get("""choices""" , _SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_SCREAMING_SNAKE_CASE ) , yy["""type"""](_SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ) -> str:
A_ : int = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--bar""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--baz""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--flag""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="""?""" )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
(A_ ) : Union[str, Any] = parser.parse_args_into_dataclasses(_SCREAMING_SNAKE_CASE , look_for_args_file=_SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--baz""" , default="""toto""" , type=_SCREAMING_SNAKE_CASE , help="""help message""" )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="""?""" )
expected.add_argument("""--baz""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_SCREAMING_SNAKE_CASE , dest="""baz""" )
expected.add_argument("""--opt""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
A_ : Union[str, Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
A_ : List[str] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
A_ : Tuple = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
A_ : Tuple = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
A_ : Union[str, Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : int = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
A_ : Union[str, Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
A_ : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Optional[Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase_ ( self ) -> List[Any]:
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = "toto"
A_ : List[str] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
A_ : str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
A_ : int = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Dict = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = parser.parse_args([] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : List[str] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase_ ( self ) -> int:
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--bar""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""help message""" )
expected.add_argument("""--baz""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_SCREAMING_SNAKE_CASE )
A_ : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = parser.parse_args([] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , bar=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
A_ : Optional[int] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : str = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : Any = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--required_str""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_SCREAMING_SNAKE_CASE , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : str = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_SCREAMING_SNAKE_CASE , )
expected.add_argument("""--opt""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--baz""" , default="""toto""" , type=_SCREAMING_SNAKE_CASE , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ) -> int:
A_ : Optional[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : Dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
A_ : Optional[int] = parser.parse_dict(_SCREAMING_SNAKE_CASE )[0]
A_ : str = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(_SCREAMING_SNAKE_CASE , parser.parse_dict , _SCREAMING_SNAKE_CASE , allow_extra_keys=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Tuple = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = os.path.join(_SCREAMING_SNAKE_CASE , """temp_json""" )
os.mkdir(_SCREAMING_SNAKE_CASE )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
A_ : int = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = HfArgumentParser(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , """temp_yaml""" )
os.mkdir(_SCREAMING_SNAKE_CASE )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
A_ : Union[str, Any] = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 364 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = ['''input_values''', '''attention_mask''']
def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ) -> List[Any]:
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
A_ : List[Any] = do_normalize
A_ : Union[str, Any] = return_attention_mask
A_ : Tuple = num_mel_bins
A_ : List[str] = hop_length
A_ : int = win_length
A_ : Optional[int] = win_function
A_ : List[Any] = frame_signal_scale
A_ : str = fmin
A_ : Optional[Any] = fmax
A_ : Any = mel_floor
A_ : Any = reduction_factor
A_ : Tuple = win_length * sampling_rate // 1000
A_ : Dict = hop_length * sampling_rate // 1000
A_ : Dict = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
A_ : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
A_ : Dict = np.array(_lowerCamelCase , np.intaa )
A_ : Dict = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
A_ : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
A_ : Any = padding_value
normed_input_values.append(_lowerCamelCase )
else:
A_ : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase_ ( self , _lowerCamelCase , ) -> np.ndarray:
A_ : int = spectrogram(
_lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
A_ : Dict = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
else:
A_ : Optional[int] = None
if audio_target is not None:
A_ : Union[str, Any] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
if inputs is None:
return inputs_target
else:
A_ : Optional[int] = inputs_target["""input_values"""]
A_ : Tuple = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
A_ : int = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
A_ : Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
A_ : List[str] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
A_ : List[str] = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A_ : Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : int = [speech]
# needed to make pad() work on spectrogram inputs
A_ : List[Any] = self.feature_size
# convert into correct format for padding
if is_target:
A_ : Tuple = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech]
A_ : Tuple = BatchFeature({"""input_values""": features} )
A_ : Dict = self.num_mel_bins
else:
A_ : Union[str, Any] = BatchFeature({"""input_values""": speech} )
A_ : Tuple = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
A_ : Union[str, Any] = feature_size_hack
# convert input values to correct format
A_ : str = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A_ : Tuple = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A_ : List[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
A_ : Any = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A_ : Any = (
attention_mask
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A_ : Any = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=_lowerCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def UpperCAmelCase_ ( self ) -> Dict[str, Any]:
A_ : List[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A_ : Optional[int] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 164 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =pearsonr(_lowerCAmelCase , _lowerCAmelCase )[0]
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
| 166 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """mgp-str"""
def __init__( self : int , _lowerCAmelCase : str=[3_2, 1_2_8] , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : int=3 , _lowerCAmelCase : str=2_7 , _lowerCAmelCase : List[str]=3_8 , _lowerCAmelCase : Tuple=5_0_2_5_7 , _lowerCAmelCase : str=3_0_5_2_2 , _lowerCAmelCase : Optional[int]=7_6_8 , _lowerCAmelCase : Optional[int]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[int]=4.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[Any]=0.02 , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =max_token_length
__lowercase =num_character_labels
__lowercase =num_bpe_labels
__lowercase =num_wordpiece_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =mlp_ratio
__lowercase =distilled
__lowercase =layer_norm_eps
__lowercase =drop_rate
__lowercase =qkv_bias
__lowercase =attn_drop_rate
__lowercase =drop_path_rate
__lowercase =output_aa_attentions
__lowercase =initializer_range
| 166 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase :
def __snake_case( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
return None
class lowercase :
def __snake_case( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
return None
class lowercase ( unittest.TestCase ):
lowercase__ : Union[str, Any] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCamelCase , "tf" , 12 , **_UpperCamelCase )
@require_torch
@slow
def __snake_case( self : List[str] ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCamelCase , "pt" , 12 , **_UpperCamelCase )
@require_torch
@slow
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(_UpperCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(_UpperCamelCase ) ) )
model.save_pretrained(_UpperCamelCase )
self._test_export(_UpperCamelCase , "pt" , 12 , _UpperCamelCase )
@require_tf
@slow
def __snake_case( self : Any ) -> Dict:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(_UpperCamelCase , "tf" , 12 , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(_UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(_UpperCamelCase , "pt" , 12 , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = quantize(_UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(_UpperCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
return path
except Exception as e:
self.fail(_UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_UpperCamelCase , _UpperCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def __snake_case( self : Any ) -> Union[str, Any]:
'''simple docstring'''
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_UpperCamelCase , _UpperCamelCase , "tf" )
def __snake_case( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(_UpperCamelCase , _UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] , _UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def __snake_case( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , _UpperCamelCase , _UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_UpperCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_UpperCamelCase ) , set(_UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_UpperCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , _UpperCamelCase , _UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_UpperCamelCase ) , 1 )
self.assertEqual(len(_UpperCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 206 | from random import randint, random
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 5 , ):
SCREAMING_SNAKE_CASE = [[-1] * number_of_cells] # Create a highway without any car
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , 0 )
while i < number_of_cells:
SCREAMING_SNAKE_CASE = (
randint(0 , UpperCAmelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase__ , -1 )
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Beforce calculations, the highway is empty
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
SCREAMING_SNAKE_CASE = min(highway_now[car_index] + 1 , UpperCAmelCase__ )
# Number of empty cell before the next car
SCREAMING_SNAKE_CASE = get_distance(UpperCAmelCase__ , UpperCAmelCase__ ) - 1
# We can't have the car causing an accident
SCREAMING_SNAKE_CASE = min(next_highway[car_index] , UpperCAmelCase__ )
if random() < probability:
# Randomly, a driver will slow down
SCREAMING_SNAKE_CASE = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(highway[0] )
for i in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = update(highway[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
SCREAMING_SNAKE_CASE = (car_index + speed) % number_of_cells
# Commit the change of position
SCREAMING_SNAKE_CASE = speed
highway.append(UpperCAmelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | 1 |
"""simple docstring"""
from math import factorial
A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def _snake_case ( lowerCamelCase__ : List[Any] ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase_ ) )
def _snake_case ( lowerCamelCase__ : str = 60 , lowerCamelCase__ : Tuple = 1_000_000 ) -> int:
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowerCamelCase_ : Any =0
# the cached sizes of the previous chains
lowerCamelCase_ : Dict ={}
for start_chain_element in range(1 , lowercase_ ):
# The temporary set will contain the elements of the chain
lowerCamelCase_ : Any =set()
lowerCamelCase_ : Optional[Any] =0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase_ : List[str] =start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase_ )
chain_set_length += 1
lowerCamelCase_ : Union[str, Any] =digit_factorial_sum(lowercase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase_ : List[Any] =chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution()}')
| 144 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 0 |
"""simple docstring"""
def __lowerCamelCase ( __a :int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("""input must be a negative integer""" )
A__ = len(bin(__a )[3:] )
A__ = bin(abs(__a ) - (1 << binary_number_length) )[3:]
A__ = (
(
"""1"""
+ """0""" * (binary_number_length - len(__a ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A : Tuple = parser.parse_args()
if args.model_type == "bert":
A : Dict = BertForMaskedLM.from_pretrained(args.model_name)
A : List[str] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
A : Optional[Any] = model.state_dict()
A : int = {}
for w in ["word_embeddings", "position_embeddings"]:
A : str = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
A : Any = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
A : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
A : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
A : List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
A : Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
A : int = state_dict['''cls.predictions.decoder.weight''']
A : str = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F'''cls.predictions.transform.dense.{w}''']
A : List[str] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 276 | 0 |
import os
from math import logaa
def lowercase__ ( __snake_case : str = "base_exp.txt" ):
'''simple docstring'''
UpperCAmelCase_ : float = 0
UpperCAmelCase_ : Tuple = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = list(map(__snake_case , line.split(',' ) ) )
if x * logaa(__snake_case ) > largest:
UpperCAmelCase_ : Union[str, Any] = x * logaa(__snake_case )
UpperCAmelCase_ : Dict = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A = False
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: List[str] = '''A painting of a squirrel eating a burger '''
lowercase__: str = torch.manual_seed(0 )
lowercase__: Union[str, Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__: Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = generator.manual_seed(0 )
lowercase__: List[str] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Tuple = '''A painting of a squirrel eating a burger '''
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: Tuple = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase__: Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 177 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
a__: List[Any] =1_0
def _lowerCamelCase ( self : Tuple ):
a__: List[str] =[1, 2, 3, 4]
a__: Tuple =[1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def _lowerCamelCase ( self : int ):
a__: List[str] =[1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
a__: Optional[Any] =[1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def _lowerCamelCase ( self : int ):
a__: List[str] =[1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
a__: Any =[1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def _lowerCamelCase ( self : Union[str, Any] ):
a__: Dict ="It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
a__ , a__: Any =process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
def _lowerCamelCase ( self : Optional[Any] ):
a__: List[Any] =""
a__ , a__: List[str] =process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
self.assertEqual(__lowerCAmelCase , [] )
def _lowerCamelCase ( self : Optional[Any] ):
a__: str =(
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
a__ , a__: List[Any] =process_story(__lowerCAmelCase )
a__: Union[str, Any] =[
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
a__: Dict =["It was the best of times."]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self : Any ):
a__: int =torch.tensor([1, 2, 3, 4] )
a__: Optional[int] =torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self : List[Any] ):
a__: List[str] =torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
a__: List[str] =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 2_3 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self : Any ):
a__: Optional[Any] =torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a__: Any =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self : Optional[Any] ):
a__: Any =1_0_1
a__: List[Any] =torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
a__: Optional[int] =torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a__: str =compute_token_type_ids(__lowerCAmelCase , __lowerCAmelCase )
np.testing.assert_array_equal(__lowerCAmelCase , __lowerCAmelCase )
| 358 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCAmelCase = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: Any =random.randint(0 , len(__magic_name__ ) - 1 )
a__: Tuple =parent_a[:random_slice] + parent_a[random_slice:]
a__: List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] ):
a__: str =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__: Union[str, Any] =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ):
a__: List[Any] =[]
# Generate more children proportionally to the fitness score.
a__: Dict =int(parent_a[1] * 100 ) + 1
a__: Tuple =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
a__: List[str] =population_score[random.randint(0 , __magic_name__ )][0]
a__ , a__: Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a__: Any =F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
a__: int =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__: str =F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__magic_name__ )
# Generate random starting population.
a__: Tuple =[]
for _ in range(__magic_name__ ):
population.append("".join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ , a__: Any =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__: Dict =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
a__: Any =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__: Optional[int] =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
a__: List[str] =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 42 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : int , __snake_case : int ):
return x if y == 0 else greatest_common_divisor(__snake_case , x % y )
def lowercase ( __snake_case : int , __snake_case : int ):
return (x * y) // greatest_common_divisor(__snake_case , __snake_case )
def lowercase ( __snake_case : int = 2_0 ):
lowercase_ : List[str] = 1
for i in range(1 , n + 1 ):
lowercase_ : Optional[int] = lcm(__snake_case , __snake_case )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A = "sshleifer/mar_enro_6_3_student"
class A ( __UpperCAmelCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
lowercase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase__ , )
lowercase__ = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def A__ ( self ) -> str:
'''simple docstring'''
MarianMTModel.from_pretrained(lowerCamelCase__ )
@slow
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowercase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationModule.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
lowercase__ = main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A ( __UpperCAmelCase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowercase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowercase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = bash_script.replace("""--fp16""" , """""" )
lowercase__ = 6
lowercase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationDistiller.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ = distill_main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 164 | 0 |
import argparse
import json
import subprocess
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any] ) -> Optional[Any]:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
UpperCamelCase__ : Tuple = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
UpperCamelCase__ : Union[str, Any] = output.stdout.decode('''utf-8''' )
UpperCamelCase__ : int = json.loads(__UpperCAmelCase )
UpperCamelCase__ : str = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
UpperCamelCase__ : Tuple = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Dict:
return values.split(''',''' )
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
UpperCAmelCase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 366 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : Any = test_results.split(''' ''' )
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCamelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Tuple:
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __UpperCAmelCase ):
UpperCamelCase__ : Any = True
UpperCamelCase__ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCamelCase__ : List[Any] = line
UpperCamelCase__ : List[Any] = False
return failures
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = title
UpperCamelCase__ : Tuple = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCamelCase__ : Optional[Any] = doc_test_results['''success''']
UpperCamelCase__ : str = doc_test_results['''failures''']
UpperCamelCase__ : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCamelCase__ : List[Any] = doc_test_results
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self._time_spent]
UpperCamelCase__ : str = 0
for time in time_spent:
UpperCamelCase__ : List[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__magic_name__ ) == 1:
UpperCamelCase__ : List[Any] = [0, 0, time_parts[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(__magic_name__ )}h{int(__magic_name__ )}m{int(__magic_name__ )}s"
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 40
UpperCamelCase__ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__magic_name__, __magic_name__ )}
UpperCamelCase__ : List[str] = ''''''
for category, failures in category_failures.items():
if len(__magic_name__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__magic_name__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__magic_name__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=__magic_name__, )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCamelCase__ : List[str] = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
UpperCamelCase__ : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ''''''
for key, value in failures.items():
UpperCamelCase__ : List[Any] = value[:200] + ''' [Truncated]''' if len(__magic_name__ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
UpperCamelCase__ : Union[str, Any] = job_name
UpperCamelCase__ : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCamelCase__ : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCamelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCamelCase__ : Optional[int] = sorted(self.doc_test_results.items(), key=lambda __magic_name__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCamelCase__ : Any = f"*Num failures* :{len(job_result['failed'] )} \n"
UpperCamelCase__ : Optional[Any] = job_result['''failures''']
UpperCamelCase__ : Optional[Any] = self.get_reply_blocks(__magic_name__, __magic_name__, __magic_name__, text=__magic_name__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f"Results for {job}", blocks=__magic_name__, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Any = os.environ['''GITHUB_RUN_ID''']
UpperCamelCase__ : Tuple = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
UpperCamelCase__ : Optional[int] = requests.get(__UpperCAmelCase ).json()
UpperCamelCase__ : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCamelCase__ : List[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Any = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[int] = {}
if os.path.exists(__UpperCAmelCase ):
UpperCamelCase__ : Dict = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
UpperCamelCase__ : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}." ) from e
return _artifact
def lowerCAmelCase_ ( ) -> str:
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = name
UpperCamelCase__ : int = []
def __str__( self ) -> Tuple:
"""simple docstring"""
return self.name
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCamelCase__ : Dict[str, Artifact] = {}
UpperCamelCase__ : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCamelCase__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
UpperCamelCase__ : Union[str, Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase_ = get_job_links()
UpperCAmelCase_ = retrieve_available_artifacts()
UpperCAmelCase_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase_ = github_actions_job_links.get('run_doctests')
UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats'])
UpperCAmelCase_ = failed
UpperCAmelCase_ = success
UpperCAmelCase_ = time_spent[1:-1] + ', '
UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase_ = line.replace('FAILED ', '')
UpperCAmelCase_ = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase_ , UpperCAmelCase_ = line.split('::')
else:
UpperCAmelCase_ , UpperCAmelCase_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase_ = failure
break
UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 247 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :str = logging.get_logger(__name__)
lowerCamelCase :Optional[int] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Any = ['past_key_values']
__SCREAMING_SNAKE_CASE : Dict = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Union[str, Any] = vocab_size
A_ : Tuple = hidden_size
A_ : str = d_kv
A_ : Dict = d_ff
A_ : Union[str, Any] = num_layers
A_ : str = num_heads
A_ : Union[str, Any] = relative_attention_num_buckets
A_ : Tuple = relative_attention_max_distance
A_ : Union[str, Any] = dropout_rate
A_ : int = layer_norm_epsilon
A_ : int = initializer_factor
A_ : Optional[Any] = use_cache
A_ : Union[str, Any] = eos_token_id
A_ : Dict = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : Optional[int] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : Tuple = hidden_size
A_ : Any = patch_embed_hidden_size
A_ : int = d_ff
A_ : Optional[Any] = dropout_rate
A_ : int = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : int = initializer_range
A_ : Dict = initializer_factor
A_ : Optional[Any] = attention_dropout
A_ : List[str] = layer_norm_eps
A_ : Optional[int] = dense_act_fn
A_ : str = seq_len
A_ : List[str] = relative_attention_num_buckets
A_ : List[str] = relative_attention_max_distance
A_ : Tuple = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : int = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'pix2struct'
__SCREAMING_SNAKE_CASE : str = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Dict = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : List[Any] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Optional[Any] = PixaStructTextConfig(**lowercase )
A_ : int = PixaStructVisionConfig(**lowercase )
A_ : int = self.text_config.decoder_start_token_id
A_ : List[Any] = self.text_config.pad_token_id
A_ : Tuple = self.text_config.eos_token_id
A_ : str = initializer_factor
A_ : List[str] = initializer_range
A_ : Optional[int] = self.initializer_range
A_ : List[Any] = self.initializer_range
A_ : Tuple = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : Union[str, Any] = self.__class__.model_type
return output | 206 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2 ):
A_ : str = bp_numa
A_ : Optional[int] = bp_numa
A_ : Optional[Any] = bp_numa
A_ : str = conva_get[:2]
A_ : Union[str, Any] = conva_get[2]
A_ : Union[str, Any] = size_pa
A_ : List[str] = rate_w
A_ : Dict = rate_t
A_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
A_ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
A_ : Any = -2 * np.random.rand(self.num_bpa ) + 1
def _a (self , lowercase ):
# save model dict with pickle
A_ : Union[str, Any] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase , """wb""" ) as f:
pickle.dump(lowercase , lowercase )
print(F'Model saved: {save_path}' )
@classmethod
def _a (cls , lowercase ):
# read saved model
with open(lowercase , """rb""" ) as f:
A_ : Optional[int] = pickle.load(lowercase ) # noqa: S301
A_ : Optional[int] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
A_ : Tuple = model_dic.get("""size_pooling1""" )
A_ : Optional[Any] = model_dic.get("""num_bp1""" )
A_ : List[str] = model_dic.get("""num_bp2""" )
A_ : Dict = model_dic.get("""num_bp3""" )
A_ : Tuple = model_dic.get("""rate_weight""" )
A_ : List[Any] = model_dic.get("""rate_thre""" )
# create model instance
A_ : List[str] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# modify model parameter
A_ : int = model_dic.get("""w_conv1""" )
A_ : str = model_dic.get("""wkj""" )
A_ : str = model_dic.get("""vji""" )
A_ : int = model_dic.get("""thre_conv1""" )
A_ : Union[str, Any] = model_dic.get("""thre_bp2""" )
A_ : List[Any] = model_dic.get("""thre_bp3""" )
return conv_ins
def _a (self , lowercase ):
return 1 / (1 + np.exp(-1 * x ))
def _a (self , lowercase ):
return round(lowercase , 3 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
# convolution process
A_ : Dict = convs[0]
A_ : Any = convs[1]
A_ : Tuple = np.shape(lowercase )[0]
# get the data slice of original image data, data_focus
A_ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase ):
A_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
A_ : int = []
A_ : List[str] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase ):
A_ : List[Any] = []
for i_focus in range(len(lowercase ) ):
A_ : List[str] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase ) )
A_ : Tuple = np.asmatrix(lowercase ).reshape(
lowercase , lowercase )
data_featuremap.append(lowercase )
# expanding the data slice to One dimenssion
A_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase ) )
A_ : Dict = np.asarray(lowercase )
return focus_list, data_featuremap
def _a (self , lowercase , lowercase , lowercase="average_pool" ):
# pooling process
A_ : Union[str, Any] = len(featuremaps[0] )
A_ : str = int(size_map / size_pooling )
A_ : List[str] = []
for i_map in range(len(lowercase ) ):
A_ : Any = featuremaps[i_map]
A_ : Any = []
for i_focus in range(0 , lowercase , lowercase ):
for j_focus in range(0 , lowercase , lowercase ):
A_ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase ) )
A_ : List[str] = np.asmatrix(lowercase ).reshape(lowercase , lowercase )
featuremap_pooled.append(lowercase )
return featuremap_pooled
def _a (self , lowercase ):
# expanding three dimension data to one dimension list
A_ : List[Any] = []
for i in range(len(lowercase ) ):
A_ : Tuple = np.shape(data[i] )
A_ : str = data[i].reshape(1 , shapes[0] * shapes[1] )
A_ : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase )
A_ : Optional[Any] = np.asarray(lowercase )
return data_expanded
def _a (self , lowercase ):
# expanding matrix to one dimension list
A_ : str = np.asarray(lowercase )
A_ : Any = np.shape(lowercase )
A_ : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[str] = []
A_ : Union[str, Any] = 0
for i_map in range(lowercase ):
A_ : Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , lowercase , lowercase ):
for j in range(0 , lowercase , lowercase ):
A_ : str = pd_pool[
i_pool
]
A_ : Optional[Any] = i_pool + 1
A_ : Union[str, Any] = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase )
return pd_all
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase )) )
A_ : Optional[Any] = 0
A_ : Dict = []
A_ : List[Any] = 10000
while rp < n_repeat and mse >= error_accuracy:
A_ : List[Any] = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
A_ : Optional[Any] = np.asmatrix(datas_train[p] )
A_ : str = np.asarray(datas_teach[p] )
A_, A_ : Dict = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Any = self.pooling(lowercase , self.size_poolinga )
A_ : int = np.shape(lowercase )
A_ : Union[str, Any] = self._expand(lowercase )
A_ : Dict = data_bp_input
A_ : int = np.dot(lowercase , self.vji.T ) - self.thre_bpa
A_ : Any = self.sig(lowercase )
A_ : Optional[int] = np.dot(lowercase , self.wkj.T ) - self.thre_bpa
A_ : List[str] = self.sig(lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A_ : Optional[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Tuple = np.multiply(
np.dot(lowercase , self.wkj ) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Union[str, Any] = np.dot(lowercase , self.vji )
A_ : int = pd_i_all / (self.size_poolinga * self.size_poolinga)
A_ : str = pd_conva_pooled.T.getA().tolist()
A_ : List[Any] = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A_ : int = self._expand_mat(pd_conva_all[k_conv] )
A_ : Any = self.rate_weight * np.dot(lowercase , lowercase )
A_ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A_ : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A_ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
A_ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A_ : List[Any] = rp + 1
A_ : Union[str, Any] = error_count / patterns
all_mse.append(lowercase )
def draw_error():
A_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase , """+-""" )
plt.plot(lowercase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def _a (self , lowercase ):
# model predict
A_ : Tuple = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase )) )
for p in range(len(lowercase ) ):
A_ : int = np.asmatrix(datas_test[p] )
A_, A_ : str = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Dict = self.pooling(lowercase , self.size_poolinga )
A_ : Tuple = self._expand(lowercase )
A_ : int = data_bp_input
A_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
A_ : List[str] = self.sig(lowercase )
A_ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
A_ : List[Any] = self.sig(lowercase )
produce_out.extend(bp_outa.getA().tolist() )
A_ : Any = [list(map(self.do_round , lowercase ) ) for each in produce_out]
return np.asarray(lowercase )
def _a (self , lowercase ):
# return the data of image after convoluting process so we can check it out
A_ : Optional[Any] = np.asmatrix(lowercase )
A_, A_ : Optional[Any] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Union[str, Any] = self.pooling(lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 206 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self : Union[str, Any] , _a : Union[str, Any] , _a : Union[str, Any]=3 , _a : List[str]=32 , _a : List[str]=3 , _a : str=10 , _a : str=[10, 20, 30, 40] , _a : str=[1, 1, 2, 1] , _a : int=True , _a : Tuple=True , _a : List[Any]="relu" , _a : Any=3 , _a : Tuple=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embeddings_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = scope
UpperCamelCase__ = len(_a )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[Any] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self : Union[str, Any] , _a : Union[str, Any] , _a : Any , _a : List[str] ):
UpperCamelCase__ = TFResNetModel(config=_a )
UpperCamelCase__ = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : str , _a : Tuple , _a : Dict , _a : Dict ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFResNetForImageClassification(_a )
UpperCamelCase__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Dict ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_A : Any = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_A : Any = False
_A : List[Any] = False
_A : Tuple = False
_A : Union[str, Any] = False
_A : Optional[Any] = False
def A_ ( self : int ):
UpperCamelCase__ = TFResNetModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def A_ ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Optional[int] ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A_ ( self : List[Any] ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A_ ( self : Optional[int] ):
pass
def A_ ( self : Dict ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A_ ( self : int ):
def check_hidden_states_output(_a : str , _a : Optional[Any] , _a : List[Any] ):
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ = layer_type
UpperCamelCase__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(_a , _a , _a )
def A_ ( self : Tuple ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def A_ ( self : Optional[int] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFResNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self : Tuple ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_a , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ = model(**_a )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
UpperCamelCase__ = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1E-4 ) )
| 35 | import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = ["""model.decoder.embed_positions.weights"""]
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
if "emb" in name:
UpperCamelCase__ = name.replace('''emb''', '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCamelCase__ = name.replace('''transformer''', '''model.decoder''' )
if "cross_attention" in name:
UpperCamelCase__ = name.replace('''cross_attention''', '''encoder_attn''' )
if "linear1" in name:
UpperCamelCase__ = name.replace('''linear1''', '''fc1''' )
if "linear2" in name:
UpperCamelCase__ = name.replace('''linear2''', '''fc2''' )
if "norm1" in name:
UpperCamelCase__ = name.replace('''norm1''', '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCamelCase__ = name.replace('''norm_cross''', '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCamelCase__ = name.replace('''norm2''', '''final_layer_norm''' )
if "out_norm" in name:
UpperCamelCase__ = name.replace('''out_norm''', '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCamelCase__ = name.replace('''linears''', '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase__ = name.replace('''condition_provider.conditioners.description.output_proj''', '''enc_to_dec_proj''' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = list(state_dict.keys() )
UpperCamelCase__ = {}
for key in keys:
UpperCamelCase__ = state_dict.pop(UpperCamelCase__ )
UpperCamelCase__ = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase__ = val[:hidden_size, :]
UpperCamelCase__ = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase__ = val
else:
UpperCamelCase__ = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCamelCase__ = 1024
UpperCamelCase__ = 24
UpperCamelCase__ = 16
elif checkpoint == "medium":
UpperCamelCase__ = 1536
UpperCamelCase__ = 48
UpperCamelCase__ = 24
elif checkpoint == "large":
UpperCamelCase__ = 2048
UpperCamelCase__ = 48
UpperCamelCase__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCamelCase__ = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__, ffn_dim=hidden_size * 4, num_hidden_layers=UpperCamelCase__, num_attention_heads=UpperCamelCase__, )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : List[Any]="cpu" ):
'''simple docstring'''
UpperCamelCase__ = MusicGen.get_pretrained(UpperCamelCase__, device=UpperCamelCase__ )
UpperCamelCase__ = decoder_config_from_checkpoint(UpperCamelCase__ )
UpperCamelCase__ = fairseq_model.lm.state_dict()
UpperCamelCase__ , UpperCamelCase__ = rename_state_dict(
UpperCamelCase__, hidden_size=decoder_config.hidden_size )
UpperCamelCase__ = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCamelCase__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCamelCase__ = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase__ , UpperCamelCase__ = decoder.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCamelCase__ = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__, audio_encoder=UpperCamelCase__, decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
UpperCamelCase__ = torch.arange(0, 8, dtype=torch.long ).reshape(2, -1 )
UpperCamelCase__ = input_ids.reshape(2 * 4, -1 )
with torch.no_grad():
UpperCamelCase__ = model(input_ids=UpperCamelCase__, decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCamelCase__ = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''', padding_side='''left''' )
UpperCamelCase__ = MusicgenProcessor(feature_extractor=UpperCamelCase__, tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
UpperCamelCase__ = 2048
UpperCamelCase__ = 2048
# set other default generation config params
UpperCamelCase__ = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase__ = True
UpperCamelCase__ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowercase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 35 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (UpperCAmelCase__ , unittest.TestCase ):
_lowercase = SpeechTaTokenizer
_lowercase = False
_lowercase = True
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = SpeechTaTokenizer(A_ )
__UpperCamelCase = AddedToken('<mask>',lstrip=A_,rstrip=A_ )
__UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self: Optional[int],A_: Any ):
'''simple docstring'''
__UpperCamelCase = """this is a test"""
__UpperCamelCase = """this is a test"""
return input_text, output_text
def snake_case_ ( self: str,A_: Tuple,A_: Dict=False,A_: Tuple=20,A_: Any=5 ):
'''simple docstring'''
__UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(A_,clean_up_tokenization_spaces=A_ )
return text, ids
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = """<pad>"""
__UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ),A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ),A_ )
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<s>' )
self.assertEqual(vocab_keys[1],'<pad>' )
self.assertEqual(vocab_keys[-4],'œ' )
self.assertEqual(vocab_keys[-2],'<mask>' )
self.assertEqual(vocab_keys[-1],'<ctc_blank>' )
self.assertEqual(len(A_ ),81 )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,79 )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCamelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__UpperCamelCase = tokenizer.add_tokens(A_ )
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_,0 )
self.assertEqual(A_,A_ )
self.assertEqual(A_,len(A_ ) )
self.assertEqual(A_,all_size + len(A_ ) )
__UpperCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l',add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ),4 )
self.assertGreater(tokens[0],tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3],tokenizer.vocab_size - 1 )
__UpperCamelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__UpperCamelCase = tokenizer.add_special_tokens(A_ )
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_,0 )
self.assertEqual(A_,A_ )
self.assertEqual(A_,len(A_ ) )
self.assertEqual(A_,all_size_a + len(A_ ) )
__UpperCamelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l',add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ),6 )
self.assertGreater(tokens[0],tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0],tokens[1] )
self.assertGreater(tokens[-3],tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3],tokens[-4] )
self.assertEqual(tokens[0],tokenizer.eos_token_id )
self.assertEqual(tokens[-3],tokenizer.pad_token_id )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
pass
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(A_,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ),[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6],)
__UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
# fmt: off
self.assertListEqual(A_,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__UpperCamelCase = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_,model_name='microsoft/speecht5_asr',revision='c5ef64c71905caeccde0e4462ef3f9077224c524',sequences=A_,)
| 310 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Any =tmp_path / """cache"""
_a : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Tuple =SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Union[str, Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_a : Any =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Tuple =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
_a : Tuple =iter_sql_file(_UpperCAmelCase )
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
_a : int =tmp_path / """cache"""
_a : Any =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Union[str, Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
_a : str =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> List[str]:
_a : List[str] =tmp_path / """cache"""
_a : Dict =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 276 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase =OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase =OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase =OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase =OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase =OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase =OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_MAPPING
UpperCAmelCase =auto_class_update(FlaxAutoModel)
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase =auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase =auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase =auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase =auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 353 |
"""simple docstring"""
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = len(_a ) + 1
A = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
A = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
A = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
A = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
A = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
A = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
A = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
A = dp[i - 1][j]
else:
A = 0
else:
A = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCAmelCase ="aab"
UpperCAmelCase ="c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 77 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if (ksize % 2) == 0:
UpperCAmelCase_ : Optional[Any] = ksize + 1
UpperCAmelCase_ : Optional[int] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
UpperCAmelCase_ : Dict = x - ksize // 2
UpperCAmelCase_ : str = y - ksize // 2
# degree to radiant
UpperCAmelCase_ : Union[str, Any] = theta / 180 * np.pi
UpperCAmelCase_ : Any = np.cos(_theta )
UpperCAmelCase_ : Tuple = np.sin(_theta )
# get kernel x
UpperCAmelCase_ : Any = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase_ : str = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase_ : List[str] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCAmelCase = imread('../image_data/lena.jpg')
# turn image in gray scale value
__UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCAmelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCAmelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCAmelCase = out / out.max() * 255
__UpperCAmelCase = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 29 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight" in name:
_snake_case = 'weight'
elif "bias" in name:
_snake_case = 'bias'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = 'gelu'
_snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.0_2
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1e-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(__A )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = 'Wav2Vec2FeatureExtractor'
_snake_case = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]:
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(__A )
else:
_snake_case = convert_config(model[0] , __A )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == 'layer' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_snake_case = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_snake_case = SEWForCTC(__A )
else:
_snake_case = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: Optional[int] = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
__lowerCAmelCase: Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token") )
return token
def a__ ( ) -> Dict:
__lowerCAmelCase: int = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: Optional[int] = "imagenet-1k-id2label.json"
__lowerCAmelCase: List[Any] = 1_0_0_0
__lowerCAmelCase: Optional[int] = "huggingface/label-files"
__lowerCAmelCase: Any = num_labels
__lowerCAmelCase: Any = json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
__lowerCAmelCase: Optional[Any] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCAmelCase: Tuple = idalabel
__lowerCAmelCase: str = {v: k for k, v in idalabel.items()}
__lowerCAmelCase: Tuple = CvtConfig(num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
__lowerCAmelCase: int = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
__lowerCAmelCase: Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__lowerCAmelCase: List[str] = [2, 2, 2_0]
__lowerCAmelCase: Optional[int] = [3, 1_2, 1_6]
__lowerCAmelCase: Dict = [1_9_2, 7_6_8, 1_0_2_4]
__lowerCAmelCase: int = CvtForImageClassification(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
__lowerCAmelCase: Optional[int] = image_size
__lowerCAmelCase: str = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
__lowerCAmelCase: List[Any] = OrderedDict()
__lowerCAmelCase: Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__lowerCAmelCase: Tuple = list_of_state_dict + cls_token(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = list_of_state_dict + embeddings(__SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
__lowerCAmelCase: Any = list_of_state_dict + attention(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase: Optional[int] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 108 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__A = get_logger(__name__)
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = """dummy_data"""
SCREAMING_SNAKE_CASE_ : List[Any] = """datasets"""
SCREAMING_SNAKE_CASE_ : Any = False
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[Version, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[List[Callable]] = None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: Tuple = dataset_name
__lowerCAmelCase: Optional[Any] = cache_dir
__lowerCAmelCase: Optional[int] = use_local_dummy_data
__lowerCAmelCase: Optional[Any] = config
# download_callbacks take a single url as input
__lowerCAmelCase: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCAmelCase: Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCAmelCase: List[str] = str(UpperCamelCase__)
# to be downloaded
__lowerCAmelCase: Dict = None
__lowerCAmelCase: Dict = None
@property
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
if self._dummy_file is None:
__lowerCAmelCase: Tuple = self.download_dummy_data()
return self._dummy_file
@property
def lowercase_ ( self : Dict)-> Optional[Any]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCAmelCase: str = cached_path(
UpperCamelCase__ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase__ , force_extract=UpperCamelCase__)
return os.path.join(UpperCamelCase__ , self.dummy_file_name)
@property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
if self._bucket_url is None:
__lowerCAmelCase: int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def lowercase_ ( self : List[Any] , UpperCamelCase__ : int , *UpperCamelCase__ : List[str])-> Optional[int]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCAmelCase: List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCAmelCase: str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase__ , UpperCamelCase__):
return self.create_dummy_data_dict(UpperCamelCase__ , UpperCamelCase__)
elif isinstance(UpperCamelCase__ , (list, tuple)):
return self.create_dummy_data_list(UpperCamelCase__ , UpperCamelCase__)
else:
return self.create_dummy_data_single(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Dict , *UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
return self.download_and_extract(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
return self.download_and_extract(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : str)-> List[str]:
'''simple docstring'''
return path
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
return {}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
for single_url in single_urls:
download_callback(UpperCamelCase__)
else:
__lowerCAmelCase: Union[str, Any] = single_urls
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Dict = [os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(Path(UpperCamelCase__).name)) for x in single_urls]
else:
__lowerCAmelCase: Any = single_urls
__lowerCAmelCase: Optional[int] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(Path(UpperCamelCase__).name))
__lowerCAmelCase: Dict = value
# make sure that values are unique
if all(isinstance(UpperCamelCase__ , UpperCamelCase__) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
__lowerCAmelCase: Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Tuple = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCAmelCase: Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase__)) for url in data_url)
__lowerCAmelCase: str = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
__lowerCAmelCase: Optional[int] = [data_url[0]] * len(UpperCamelCase__)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCAmelCase: Optional[Any] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(UpperCamelCase__)
return dummy_data_list
def lowercase_ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCAmelCase: List[Any] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(UpperCamelCase__) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
pass
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self : Dict , UpperCamelCase__ : str)-> int:
'''simple docstring'''
def _iter_archive_members(UpperCamelCase__ : str):
# this preserves the order of the members inside the ZIP archive
__lowerCAmelCase: Optional[Any] = Path(self.dummy_file).parent
__lowerCAmelCase: Optional[int] = path.relative_to(UpperCamelCase__)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
__lowerCAmelCase: Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(UpperCamelCase__)
__lowerCAmelCase: str = Path(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = _iter_archive_members(UpperCamelCase__) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(UpperCamelCase__).as_posix(), file_path.open("rb")
def lowercase_ ( self : str , UpperCamelCase__ : str)-> str:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Dict = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase__):
if os.path.basename(UpperCamelCase__).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase__):
if os.path.basename(UpperCamelCase__).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase__):
if filename.startswith((".", "__")):
continue
yield os.path.join(UpperCamelCase__ , UpperCamelCase__)
| 108 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to log verbose messages or not.'''}, )
lowercase__ = field(
default=2.0, metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.5, metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.99_99_95, metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
A__ = logging.WARNING
if model_args.verbose_logging:
A__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A__ = logging.INFO
logger.setLevel(lowercase_ )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default='''train''', metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
}, )
lowercase__ = field(
default='''validation''', metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
}, )
lowercase__ = field(
default='''file''', metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase__ = field(
default=1, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=20.0, metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = "longest"
lowercase__ = None
lowercase__ = None
def __call__( self : Tuple , snake_case_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.feature_extractor.pad(
snake_case_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
A__ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
A__ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A__ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
A__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A__ = 1
A__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=snake_case_ , min_masks=2 , )
return batch
class UpperCAmelCase_ ( A_ ):
def __init__( self : Any , *snake_case_ : Dict , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , snake_case_ : str=1.0 , **snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
A__ = 0
A__ = max_gumbel_temp
A__ = min_gumbel_temp
A__ = gumbel_temp_decay
def __magic_name__ ( self : Tuple , snake_case_ : nn.Module , snake_case_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
A__ = self._prepare_inputs(snake_case_ )
if self.use_amp:
with autocast():
A__ = self.compute_loss(snake_case_ , snake_case_ )
else:
A__ = self.compute_loss(snake_case_ , snake_case_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A__ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
A__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case_ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__, A__, A__ = parser.parse_args_into_dataclasses()
configure_logger(lowercase_ , lowercase_ )
# Downloading and loading a dataset from the hub.
A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
A__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase_ )
def prepare_dataset(lowercase_ ):
# check that all files have the correct sampling rate
A__, A__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A__ = datasets.map(
lowercase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
A__ = vectorized_datasets.filter(
lambda lowercase_ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase_ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A__ = vectorized_datasets.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
A__ = WavaVecaForPreTraining(lowercase_ )
A__ = DataCollatorForWavaVecaPretraining(model=lowercase_ , feature_extractor=lowercase_ )
A__ = WavaVecaPreTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowercase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 247 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCAmelCase_ (lowerCAmelCase__: Iterable[str] , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: List[str] = iter(a_ )
while True:
UpperCAmelCase_: int = tuple(itertools.islice(a_ , a_ ) )
if not chunk:
return
yield chunk
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
UpperCAmelCase_: List[str] = ""
if len(a_ ) < 2:
return dirty
for i in range(len(a_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(a_ ) & 1:
clean += "X"
return clean
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Dict = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
UpperCAmelCase_: str = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(a_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(a_ )
return table
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Tuple = generate_table(a_ )
UpperCAmelCase_: Union[str, Any] = prepare_input(a_ )
UpperCAmelCase_: List[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_ , 2 ):
UpperCAmelCase_: Optional[Any] = divmod(table.index(a_ ) , 5 )
UpperCAmelCase_: Union[str, Any] = divmod(table.index(a_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = generate_table(a_ )
UpperCAmelCase_: Optional[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_ , 2 ):
UpperCAmelCase_: Optional[int] = divmod(table.index(a_ ) , 5 )
UpperCAmelCase_: Any = divmod(table.index(a_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 367 |
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[int | float] , lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowerCAmelCase__ )
or left < -len(lowerCAmelCase__ )
or right >= len(lowerCAmelCase__ )
or right < -len(lowerCAmelCase__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
UpperCAmelCase_: int = (left + right) >> 1 # the middle
UpperCAmelCase_: List[Any] = find_max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # find max in range[left, mid]
UpperCAmelCase_: Any = find_max(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 82 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any=100 , snake_case_ : List[str]=13 , snake_case_ : str=30 , snake_case_ : int=2 , snake_case_ : int=3 , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=32 , snake_case_ : Tuple=5 , snake_case_ : Optional[int]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : List[Any]="gelu" , snake_case_ : str=0.1 , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=10 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : Optional[Any]=3 , ):
snake_case__ : Any = parent
snake_case__ : List[str] = vocab_size
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[int] = image_size
snake_case__ : int = patch_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = is_training
snake_case__ : Dict = use_labels
snake_case__ : Dict = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : str = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Dict = (image_size // patch_size) ** 2
snake_case__ : Dict = num_patches + 1
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowerCamelCase ( self : Any , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[str] ):
snake_case__ : Union[str, Any] = FlaxBeitModel(config=snake_case_ )
snake_case__ : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Dict ):
snake_case__ : str = FlaxBeitForMaskedImageModeling(config=snake_case_ )
snake_case__ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Tuple ):
snake_case__ : Dict = self.type_sequence_label_size
snake_case__ : int = FlaxBeitForImageClassification(config=snake_case_ )
snake_case__ : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : str = 1
snake_case__ : List[str] = FlaxBeitForImageClassification(snake_case_ )
snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : int = model(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = FlaxBeitModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowerCamelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(snake_case_ )
snake_case__ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : List[str] ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : str = self._prepare_for_class(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ : List[Any] , **snake_case_ : Tuple ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest("""JIT Enabled""" ):
snake_case__ : Tuple = model_jitted(**snake_case_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
snake_case__ : Tuple = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
snake_case__ : int = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[Any]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=snake_case_ , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
snake_case__ : Any = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
snake_case__ : Dict = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
snake_case__ : List[str] = outputs.logits
# verify the logits
snake_case__ : Dict = (1, 196, 8_192)
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : Any = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
snake_case__ : Optional[Any] = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors="""np""" )
# forward pass
snake_case__ : List[Any] = model(**snake_case_ )
snake_case__ : Optional[Any] = outputs.logits
# verify the logits
snake_case__ : str = (1, 1_000)
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : Any = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
snake_case__ : Union[str, Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
snake_case__ : Optional[int] = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors="""np""" )
# forward pass
snake_case__ : Optional[int] = model(**snake_case_ )
snake_case__ : int = outputs.logits
# verify the logits
snake_case__ : int = (1, 21_841)
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : Optional[int] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
snake_case__ : Optional[int] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 35 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : List[str] = 0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
snake_case__ : str = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = """a""" * 1_000 + """.lock"""
snake_case__ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 35 | 1 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase : str = ''
for i in table:
res += inp[i - 1]
return res
def lowercase__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return data[1:] + data[0]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase : List[Any] = ''
for i in range(len(UpperCamelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase : List[Any] = int('0b' + data[0] + data[-1] , 2 )
lowercase : Tuple = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase : Any = message[:4]
lowercase : str = message[4:]
lowercase : Any = apply_table(UpperCamelCase__ , UpperCamelCase__ )
lowercase : List[Any] = xor(UpperCamelCase__ , UpperCamelCase__ )
lowercase : List[str] = apply_sbox(UpperCamelCase__ , temp[:4] ) # noqa: E741
lowercase : str = apply_sbox(UpperCamelCase__ , temp[4:] )
lowercase : Optional[Any] = '0' * (2 - len(UpperCamelCase__ )) + l # noqa: E741
lowercase : Optional[Any] = '0' * (2 - len(UpperCamelCase__ )) + r
lowercase : Tuple = apply_table(l + r , UpperCamelCase__ )
lowercase : Dict = xor(UpperCamelCase__ , UpperCamelCase__ )
return temp + right
if __name__ == "__main__":
_UpperCamelCase: int = input('Enter 10 bit key: ')
_UpperCamelCase: Optional[Any] = input('Enter 8 bit message: ')
_UpperCamelCase: int = [6, 3, 7, 4, 8, 5, 1_0, 9]
_UpperCamelCase: List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
_UpperCamelCase: Tuple = [2, 4, 3, 1]
_UpperCamelCase: int = [2, 6, 3, 1, 4, 8, 5, 7]
_UpperCamelCase: List[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
_UpperCamelCase: Any = [4, 1, 2, 3, 2, 3, 4, 1]
_UpperCamelCase: Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_UpperCamelCase: Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_UpperCamelCase: Tuple = apply_table(key, paa_table)
_UpperCamelCase: Optional[Any] = temp[:5]
_UpperCamelCase: List[Any] = temp[5:]
_UpperCamelCase: List[str] = left_shift(left)
_UpperCamelCase: Union[str, Any] = left_shift(right)
_UpperCamelCase: Optional[Any] = apply_table(left + right, pa_table)
_UpperCamelCase: Tuple = left_shift(left)
_UpperCamelCase: int = left_shift(right)
_UpperCamelCase: Union[str, Any] = left_shift(left)
_UpperCamelCase: Optional[Any] = left_shift(right)
_UpperCamelCase: Tuple = apply_table(left + right, pa_table)
# encryption
_UpperCamelCase: str = apply_table(message, IP)
_UpperCamelCase: List[Any] = function(expansion, sa, sa, keya, temp)
_UpperCamelCase: Dict = temp[4:] + temp[:4]
_UpperCamelCase: Optional[int] = function(expansion, sa, sa, keya, temp)
_UpperCamelCase: List[str] = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_UpperCamelCase: List[str] = apply_table(CT, IP)
_UpperCamelCase: Optional[int] = function(expansion, sa, sa, keya, temp)
_UpperCamelCase: int = temp[4:] + temp[:4]
_UpperCamelCase: Optional[int] = function(expansion, sa, sa, keya, temp)
_UpperCamelCase: str = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 363 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase : List[Any] = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_UpperCAmelCase )
else:
lowercase : str = sylvester(number - 1 )
lowercase : Union[str, Any] = num - 1
lowercase : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 53 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase ( lowerCAmelCase__ : List[str] ) -> str:
__a = botoa.client('''iam''' )
__a = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCAmelCase__ , AssumeRolePolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) )
__a = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCAmelCase__ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
__a = botoa.client('''iam''' )
return iam_client.get_role(RoleName=lowerCAmelCase__ )["Role"]["Arn"]
def lowercase ( ) -> List[Any]:
__a = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , lowerCAmelCase__ , )
__a = None
if credentials_configuration == 0:
__a = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
__a = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
__a = _ask_field('''AWS Access Key ID: ''' )
__a = aws_access_key_id
__a = _ask_field('''AWS Secret Access Key: ''' )
__a = aws_secret_access_key
__a = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
__a = aws_region
__a = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , lowerCAmelCase__ , )
if role_management == 0:
__a = _ask_field('''Enter your IAM role name: ''' )
else:
__a = '''accelerate_sagemaker_execution_role'''
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowerCAmelCase__ )
__a = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_custom_docker_image:
__a = _ask_field('''Enter your Docker image: ''' , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() )
__a = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
__a = {}
__a = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
__a = '''dynamo_'''
__a = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__a = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
__a = _ask_options(
'''Which mode do you want to use?''' , lowerCAmelCase__ , lambda lowerCAmelCase__ : TORCH_DYNAMO_MODES[int(lowerCAmelCase__ )] , default='''default''' , )
__a = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message='''Please enter yes or no.''' , )
__a = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message='''Please enter yes or no.''' , )
__a = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
lowerCAmelCase__ , lowerCAmelCase__ , lambda lowerCAmelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCAmelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(lowerCAmelCase__ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , default='''ml.p3.2xlarge''' )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
'''How many machines do you want use? [1]: ''' , lowerCAmelCase__ , default=1 , )
__a = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=lowerCAmelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCAmelCase__ , use_cpu=lowerCAmelCase__ , dynamo_config=lowerCAmelCase__ , eca_instance_type=lowerCAmelCase__ , profile=lowerCAmelCase__ , region=lowerCAmelCase__ , iam_role_name=lowerCAmelCase__ , mixed_precision=lowerCAmelCase__ , num_machines=lowerCAmelCase__ , sagemaker_inputs_file=lowerCAmelCase__ , sagemaker_metrics_file=lowerCAmelCase__ , )
| 45 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self , a , a , a , a , a , a=0.2 , a=0.2 ) -> Dict:
lowercase__ : Any = bp_numa
lowercase__ : Optional[int] = bp_numa
lowercase__ : Tuple = bp_numa
lowercase__ : Optional[Any] = conva_get[:2]
lowercase__ : Optional[int] = conva_get[2]
lowercase__ : Optional[Any] = size_pa
lowercase__ : Union[str, Any] = rate_w
lowercase__ : Union[str, Any] = rate_t
lowercase__ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : Any = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
# save model dict with pickle
lowercase__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(a , 'wb' ) as f:
pickle.dump(a , a )
print(f"""Model saved: {save_path}""" )
@classmethod
def _UpperCAmelCase ( cls , a ) -> Any:
# read saved model
with open(a , 'rb' ) as f:
lowercase__ : Optional[int] = pickle.load(a ) # noqa: S301
lowercase__ : Optional[int] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase__ : List[Any] = model_dic.get('size_pooling1' )
lowercase__ : Tuple = model_dic.get('num_bp1' )
lowercase__ : int = model_dic.get('num_bp2' )
lowercase__ : int = model_dic.get('num_bp3' )
lowercase__ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase__ : Tuple = model_dic.get('rate_thre' )
# create model instance
lowercase__ : Tuple = CNN(a , a , a , a , a , a , a )
# modify model parameter
lowercase__ : str = model_dic.get('w_conv1' )
lowercase__ : Optional[int] = model_dic.get('wkj' )
lowercase__ : Tuple = model_dic.get('vji' )
lowercase__ : str = model_dic.get('thre_conv1' )
lowercase__ : Union[str, Any] = model_dic.get('thre_bp2' )
lowercase__ : List[str] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self , a ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self , a ) -> Any:
return round(a , 3 )
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[str]:
# convolution process
lowercase__ : int = convs[0]
lowercase__ : Optional[Any] = convs[1]
lowercase__ : int = np.shape(a )[0]
# get the data slice of original image data, data_focus
lowercase__ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , a ):
for j_focus in range(0 , size_data - size_conv + 1 , a ):
lowercase__ : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(a )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(a ):
lowercase__ : Any = []
for i_focus in range(len(a ) ):
lowercase__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(a ) )
lowercase__ : Optional[Any] = np.asmatrix(a ).reshape(
a , a )
data_featuremap.append(a )
# expanding the data slice to One dimenssion
lowercase__ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(a ) )
lowercase__ : int = np.asarray(a )
return focus_list, data_featuremap
def _UpperCAmelCase ( self , a , a , a="average_pool" ) -> str:
# pooling process
lowercase__ : List[str] = len(featuremaps[0] )
lowercase__ : List[str] = int(size_map / size_pooling )
lowercase__ : str = []
for i_map in range(len(a ) ):
lowercase__ : List[str] = featuremaps[i_map]
lowercase__ : Optional[int] = []
for i_focus in range(0 , a , a ):
for j_focus in range(0 , a , a ):
lowercase__ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(a ) )
lowercase__ : List[Any] = np.asmatrix(a ).reshape(a , a )
featuremap_pooled.append(a )
return featuremap_pooled
def _UpperCAmelCase ( self , a ) -> List[str]:
# expanding three dimension data to one dimension list
lowercase__ : Any = []
for i in range(len(a ) ):
lowercase__ : Optional[int] = np.shape(data[i] )
lowercase__ : int = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__ : str = data_listed.getA().tolist()[0]
data_expanded.extend(a )
lowercase__ : int = np.asarray(a )
return data_expanded
def _UpperCAmelCase ( self , a ) -> Dict:
# expanding matrix to one dimension list
lowercase__ : Dict = np.asarray(a )
lowercase__ : Union[str, Any] = np.shape(a )
lowercase__ : Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[Any]:
lowercase__ : Dict = []
lowercase__ : int = 0
for i_map in range(a ):
lowercase__ : str = np.ones((size_map, size_map) )
for i in range(0 , a , a ):
for j in range(0 , a , a ):
lowercase__ : Optional[Any] = pd_pool[
i_pool
]
lowercase__ : Union[str, Any] = i_pool + 1
lowercase__ : List[Any] = np.multiply(
a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(a )
return pd_all
def _UpperCAmelCase ( self , a , a , a , a , a , a=bool ) -> str:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(a )) )
print((' - - Shape: Teach_Data ', np.shape(a )) )
lowercase__ : int = 0
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowercase__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(a ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ : Optional[int] = np.asmatrix(datas_train[p] )
lowercase__ : int = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ : Union[str, Any] = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[Any] = self.pooling(a , self.size_poolinga )
lowercase__ : Tuple = np.shape(a )
lowercase__ : List[str] = self._expand(a )
lowercase__ : Optional[int] = data_bp_input
lowercase__ : Optional[Any] = np.dot(a , self.vji.T ) - self.thre_bpa
lowercase__ : str = self.sig(a )
lowercase__ : Tuple = np.dot(a , self.wkj.T ) - self.thre_bpa
lowercase__ : Any = self.sig(a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ : int = np.multiply(
(data_teach - bp_outa) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Any = np.multiply(
np.dot(a , self.wkj ) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Optional[int] = np.dot(a , self.vji )
lowercase__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ : Any = pd_conva_pooled.T.getA().tolist()
lowercase__ : List[str] = self._calculate_gradient_from_pool(
a , a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ : Tuple = self.rate_weight * np.dot(a , a )
lowercase__ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ : Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ : str = rp + 1
lowercase__ : List[str] = error_count / patterns
all_mse.append(a )
def draw_error():
lowercase__ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(a , '+-' )
plt.plot(a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self , a ) -> List[Any]:
# model predict
lowercase__ : Optional[int] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(a )) )
for p in range(len(a ) ):
lowercase__ : List[str] = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ : Tuple = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Any = self.pooling(a , self.size_poolinga )
lowercase__ : Union[str, Any] = self._expand(a )
lowercase__ : Optional[Any] = data_bp_input
lowercase__ : str = bp_outa * self.vji.T - self.thre_bpa
lowercase__ : Optional[Any] = self.sig(a )
lowercase__ : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ : List[str] = self.sig(a )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ : Optional[int] = [list(map(self.do_round , a ) ) for each in produce_out]
return np.asarray(a )
def _UpperCAmelCase ( self , a ) -> List[str]:
# return the data of image after convoluting process so we can check it out
lowercase__ : Any = np.asmatrix(a )
lowercase__ , lowercase__ : str = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Tuple = self.pooling(a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 77 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def A ( _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
_UpperCAmelCase = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = set()
for token in tokens:
_UpperCAmelCase = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
_UpperCAmelCase = list(_UpperCAmelCase )
return word_list
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : set() ) -> int:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_UpperCAmelCase = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
_UpperCAmelCase = bert_tokens
_UpperCAmelCase , _UpperCAmelCase = 0, len(_UpperCAmelCase )
while start < end:
_UpperCAmelCase = True
if is_chinese(bert_word[start] ):
_UpperCAmelCase = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
_UpperCAmelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_UpperCAmelCase = '##' + bert_word[j]
_UpperCAmelCase = start + i
_UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : LTP , _UpperCAmelCase : BertTokenizer ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(0 , len(_UpperCAmelCase ) , 100 ):
_UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
_UpperCAmelCase = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = []
for i in range(0 , len(_UpperCAmelCase ) , 100 ):
_UpperCAmelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = []
for id in input_ids:
_UpperCAmelCase = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
_UpperCAmelCase = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
_UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def A ( _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
_UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCAmelCase = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
UpperCAmelCase__ = parser.parse_args()
main(args)
| 290 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
def __init__( self : str , A : str , A : Dict=13 , A : int=7 , A : Tuple=True , A : Union[str, Any]=True , A : Any=True , A : Dict=True , A : Dict=99 , A : Tuple=32 , A : Any=2 , A : Any=4 , A : Any=37 , A : Optional[Any]="gelu" , A : List[Any]=0.1 , A : Tuple=0.1 , A : Optional[Any]=5_12 , A : Tuple=16 , A : int=2 , A : List[str]=0.0_2 , A : int=False , A : List[Any]=True , A : Optional[Any]="None" , A : Union[str, Any]=3 , A : List[str]=4 , A : List[Any]=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple , A : int , A : Any , A : List[str] , A : List[str] , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel(config=A)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str , A : Tuple , A : Tuple , A : Optional[int] , A : List[str] , A : Any , A : List[str] , A : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForMaskedLM(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : Tuple , A : Optional[int] , A : Optional[int] , A : List[Any] , A : Any , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForSequenceClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any] , A : List[Any] , A : List[str] , A : Optional[Any] , A : int , A : Any , A : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForTokenClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : List[str] , A : Dict , A : Dict , A : Any , A : Tuple , A : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForQuestionAnswering(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A)
@slow
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
self.assertIsNotNone(A)
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
_UpperCAmelCase = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_UpperCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_UpperCAmelCase = model(A , attention_mask=A)[0]
_UpperCAmelCase = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , A , atol=1E-4)
| 290 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] ="markuplm"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=0 , snake_case__=2 , snake_case__=256 , snake_case__=1_024 , snake_case__=216 , snake_case__=1_001 , snake_case__=32 , snake_case__=50 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Tuple = layer_norm_eps
lowerCAmelCase : str = position_embedding_type
lowerCAmelCase : List[str] = use_cache
lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
lowerCAmelCase : Tuple = max_depth
lowerCAmelCase : Optional[Any] = max_xpath_tag_unit_embeddings
lowerCAmelCase : List[str] = max_xpath_subs_unit_embeddings
lowerCAmelCase : List[Any] = tag_pad_id
lowerCAmelCase : Optional[Any] = subs_pad_id
lowerCAmelCase : List[str] = xpath_unit_hidden_size
| 108 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ = "" , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase : str = is_leaf
lowerCAmelCase : str = prefix
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
for word in words:
self.insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.prefix == word:
lowerCAmelCase : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase : Optional[Any] = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
lowerCAmelCase : Tuple = self.nodes[word[0]]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase : Optional[Any] = remaining_prefix
lowerCAmelCase : int = self.nodes[matching_string[0]]
lowerCAmelCase : List[Any] = RadixNode(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = aux_node
if remaining_word == "":
lowerCAmelCase : Optional[int] = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase : List[str] = list(self.nodes.values() )[0]
lowerCAmelCase : List[str] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase : Optional[int] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase : int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase : Tuple = merging_node.nodes
return True
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase : List[str] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def a__ ( ):
'''simple docstring'''
assert test_trie()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = RadixNode()
lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(SCREAMING_SNAKE_CASE )
print("Words:" , SCREAMING_SNAKE_CASE )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 108 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , A : Union[str, Any] , A : Any=7 , A : Optional[int]=3 , A : Tuple=3_0 , A : List[Any]=4_0_0 , A : str=True , A : Optional[int]=None , A : Tuple=True , A : Union[str, Any]=1 / 2_5_5 , A : Any=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Optional[int]=[0.5, 0.5, 0.5] , A : str=True , ) ->List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Optional[Any] = min_resolution
lowerCamelCase__ : List[Any] = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : Union[str, Any] = do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : str = image_std
lowerCamelCase__ : List[Any] = do_pad
def __lowerCamelCase ( self : List[str] ) ->Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self : Tuple , A : int , A : List[str]=False ) ->int:
if not batched:
lowerCamelCase__ : Union[str, Any] = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : List[Any] = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Any = self.size['''shortest_edge''']
else:
lowerCamelCase__ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase__ : Dict = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
lowerCamelCase__ : Optional[int] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self : Dict ) ->Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''rescale_factor''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
lowerCamelCase__ : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , A )
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
pass
def __lowerCamelCase ( self : str ) ->Optional[Any]:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : int = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase__ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[str] ) ->List[str]:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : Any ) ->Any:
# Initialize image_processing
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self : Tuple ) ->List[Any]:
# prepare image and target
lowerCamelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase__ : Union[str, Any] = json.loads(f.read() )
lowerCamelCase__ : List[str] = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase__ : Optional[int] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
lowerCamelCase__ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase__ : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase__ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase__ : str = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase__ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase__ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
# prepare image, target and masks_path
lowerCamelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase__ : Optional[Any] = json.loads(f.read() )
lowerCamelCase__ : Union[str, Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase__ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase__ : Any = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
lowerCamelCase__ : Tuple = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase__ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase__ : int = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase__ : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase__ : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 265 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=True , UpperCAmelCase=2 ) -> str:
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : Optional[Any] = take_from
lowerCamelCase__ : Any = ()
if not isinstance(args[0] , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : List[Any] = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
lowerCamelCase__ : Tuple = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : int = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : int = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Optional[Any] = call_frame.filename
lowerCamelCase__ : List[Any] = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 265 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE :Optional[Any] = 2_9979_2458
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Tuple = symbols('ct x y z')
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(a_ ) ** 2 )
def UpperCAmelCase ( a_ ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(a_ ), -gamma(a_ ) * beta(a_ ), 0, 0],
[-gamma(a_ ) * beta(a_ ), gamma(a_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCAmelCase ( a_ , a_ = None ) -> np.ndarray:
"""simple docstring"""
if event is None:
__A = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(a_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE :Any = transform(2997_9245)
print('Example of four vector: ')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE :Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE :Optional[Any] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 15 |
from math import isqrt, loga
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def _UpperCAmelCase ( snake_case = 80_08_00 , snake_case = 80_08_00 ):
"""simple docstring"""
_lowerCAmelCase = degree * loga(snake_case )
_lowerCAmelCase = int(snake_case )
_lowerCAmelCase = calculate_prime_numbers(snake_case )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 0 |
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
lowerCAmelCase__ : List[str] = b * b - 4 * a * c
lowerCAmelCase__ : Optional[Any] = (-b + sqrt(A_ )) / (2 * a)
lowerCAmelCase__ : str = (-b - sqrt(A_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 369 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__UpperCamelCase : List[Any] = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BlenderbotSmallTokenizer
def __init__( self : Dict ,lowercase_ : Dict=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : Any="<|endoftext|>" ,lowercase_ : Optional[Any]="<|endoftext|>" ,lowercase_ : Dict="<|endoftext|>" ,lowercase_ : Optional[int]=False ,lowercase_ : Union[str, Any]=True ,**lowercase_ : Union[str, Any] ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ ,merges=lowercase_ ,add_prefix_space=lowercase_ ,trim_offsets=lowercase_ ,) ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : Tuple = add_prefix_space
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[int] ,lowercase_ : int=None ):
lowerCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 74 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : int , __snake_case : int ):
return 1 if input_a == input_a else 0
def lowercase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 33 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53 | 0 |
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
while a != 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = b % a, a
return b
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if gcd(__UpperCamelCase ,__UpperCamelCase ) != 1:
SCREAMING_SNAKE_CASE : str = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = 1, 0, a
SCREAMING_SNAKE_CASE : Dict = 0, 1, m
while va != 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = ua // va
SCREAMING_SNAKE_CASE : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 366 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.framework == "tf":
SCREAMING_SNAKE_CASE : List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=A )
else:
raise ValueError('Unsupported framework' )
return masked_index
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_masked_index(A )
SCREAMING_SNAKE_CASE : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask', self.model.base_model_prefix, F"No mask_token ({self.tokenizer.mask_token}) found on the input", )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if isinstance(A, A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def UpperCamelCase_ ( self, A, A=None, **A ):
'''simple docstring'''
if return_tensors is None:
SCREAMING_SNAKE_CASE : Dict = self.framework
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(A, return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model(**A )
SCREAMING_SNAKE_CASE : List[str] = model_inputs['input_ids']
return model_outputs
def UpperCamelCase_ ( self, A, A=5, A=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE : List[str] = target_ids.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE : Dict = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE : Tuple = outputs.numpy()
SCREAMING_SNAKE_CASE : Any = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE : List[Any] = stable_softmax(A, axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.gather_nd(tf.squeeze(A, 0 ), target_ids.reshape(-1, 1 ) )
SCREAMING_SNAKE_CASE : Optional[int] = tf.expand_dims(A, 0 )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.math.top_k(A, k=A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE : Optional[int] = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE : Any = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE : int = probs[..., target_ids]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = probs.topk(A )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist() ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for v, p in zip(_values, _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE : Tuple = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE : Any = target_ids[p].tolist()
SCREAMING_SNAKE_CASE : List[Any] = p
# Filter padding out:
SCREAMING_SNAKE_CASE : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(A, skip_special_tokens=A )
SCREAMING_SNAKE_CASE : List[Any] = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : List[Any] = [targets]
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[str] = []
for target in targets:
SCREAMING_SNAKE_CASE : Dict = vocab.get(A, A )
if id_ is None:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
A, add_special_tokens=A, return_attention_mask=A, return_token_type_ids=A, max_length=1, truncation=A, )['input_ids']
if len(A ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE : List[str] = list(set(A ) )
if len(A ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE : Any = np.array(A )
return target_ids
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if targets is not None:
SCREAMING_SNAKE_CASE : Any = self.get_target_ids(A, A )
SCREAMING_SNAKE_CASE : str = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask', self.model.base_model_prefix, 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self, A, *A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = super().__call__(A, **A )
if isinstance(A, A ) and len(A ) == 1:
return outputs[0]
return outputs
| 246 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __snake_case ( __lowerCAmelCase ):
a__ = 42
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__: Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}')
a__: List[Any] = latents.to(lowercase)
a__: Any = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self , lowercase=0) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
a__: Union[str, Any] = torch.device(f'cuda:{gpu_id}')
a__: List[str] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase)
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
if self.device != torch.device('meta') or not hasattr(self.image_encoder , '_hf_hook'):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ) -> str:
'''simple docstring'''
if isinstance(lowercase , lowercase) and isinstance(image[0] , torch.Tensor):
a__: Any = torch.cat(lowercase , axis=0) if image[0].ndim == 4 else torch.stack(lowercase , axis=0)
if not isinstance(lowercase , torch.Tensor):
a__: List[Any] = self.image_processor(lowercase , return_tensors='pt').pixel_values[0].unsqueeze(0)
a__: str = image.to(dtype=self.image_encoder.dtype , device=lowercase)
a__: str = self.image_encoder(lowercase)['last_hidden_state']
a__: List[str] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a__: List[str] = image_embeds.repeat_interleave(lowercase , dim=0)
if do_classifier_free_guidance:
a__: Optional[Any] = torch.zeros_like(lowercase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__: Optional[int] = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase)
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowercase , PIL.Image.Image):
a__: Any = 1
elif isinstance(lowercase , torch.Tensor):
a__: Tuple = image.shape[0]
elif isinstance(lowercase , lowercase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
a__: Any = len(lowercase)
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase)}')
a__: Any = self._execution_device
a__: Optional[Any] = batch_size * num_images_per_prompt
a__: Optional[Any] = guidance_scale > 1.0
a__: Dict = self._encode_image(lowercase , lowercase , lowercase , lowercase)
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase)
a__: Dict = self.scheduler.timesteps
a__: List[Any] = self.prior.config.num_embeddings
a__: List[str] = self.prior.config.embedding_dim
a__: Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a__: List[Any] = latents.reshape(latents.shape[0] , lowercase , lowercase)
for i, t in enumerate(self.progress_bar(lowercase)):
# expand the latents if we are doing classifier free guidance
a__: Dict = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a__: Tuple = self.scheduler.scale_model_input(lowercase , lowercase)
a__: Any = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
a__ , a__: List[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a__ , a__: str = noise_pred.chunk(2)
a__: Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a__: str = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase)
a__: Any = []
for i, latent in enumerate(lowercase):
print()
a__: int = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(lowercase)
a__: int = torch.stack(lowercase)
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}')
a__: List[str] = images.cpu().numpy()
if output_type == "pil":
a__: Optional[int] = [self.numpy_to_pil(lowercase) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook') and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase)
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
while a != 0:
a__ , a__: List[str] = b % a, a
return b
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Union[str, Any] = 1, 0, a
a__ , a__ , a__: Any = 0, 1, m
while va != 0:
a__: int = ua // va
a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290 | 1 |
"""simple docstring"""
from math import factorial
A : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) )
def _lowerCamelCase ( _UpperCamelCase = 60 , _UpperCamelCase = 100_0000 ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
__lowerCAmelCase = 0
# the cached sizes of the previous chains
__lowerCAmelCase = {}
for start_chain_element in range(1 , _UpperCamelCase ):
# The temporary set will contain the elements of the chain
__lowerCAmelCase = set()
__lowerCAmelCase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowerCAmelCase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCamelCase )
chain_set_length += 1
__lowerCAmelCase = digit_factorial_sum(_UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowerCAmelCase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 259 |
"""simple docstring"""
import string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__lowerCAmelCase = ""
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCAmelCase = string.ascii_uppercase.find(_UpperCamelCase )
__lowerCAmelCase = num - key
if num < 0:
__lowerCAmelCase = num + len(string.ascii_uppercase )
__lowerCAmelCase = translated + string.ascii_uppercase[num]
else:
__lowerCAmelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = input("Encrypted message: " )
__lowerCAmelCase = message.upper()
decrypt(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 259 | 1 |
'''simple docstring'''
import argparse
import copy
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : int = {}
with open(_lowercase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : str = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Tuple = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
with open(_lowercase ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : Dict = start_node
UpperCAmelCase : int = []
UpperCAmelCase : Dict = start_node
UpperCAmelCase : str = 0
while visiting not in first_solution:
UpperCAmelCase : str = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowercase ) and k[0] not in first_solution:
UpperCAmelCase : List[str] = k[1]
UpperCAmelCase : int = k[0]
first_solution.append(_lowercase )
UpperCAmelCase : List[Any] = distance_of_first_solution + int(_lowercase )
UpperCAmelCase : str = best_node
first_solution.append(_lowercase )
UpperCAmelCase : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : Dict = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = []
for n in solution[1:-1]:
UpperCAmelCase : int = solution.index(_lowercase )
for kn in solution[1:-1]:
UpperCAmelCase : Optional[int] = solution.index(_lowercase )
if n == kn:
continue
UpperCAmelCase : Union[str, Any] = copy.deepcopy(_lowercase )
UpperCAmelCase : Any = kn
UpperCAmelCase : str = n
UpperCAmelCase : Any = 0
for k in _tmp[:-1]:
UpperCAmelCase : int = _tmp[_tmp.index(_lowercase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : Union[str, Any] = distance + int(i[1] )
_tmp.append(_lowercase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : Optional[int] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowercase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : str = 1
UpperCAmelCase : Optional[Any] = first_solution
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = distance_of_first_solution
UpperCAmelCase : Tuple = solution
while count <= iters:
UpperCAmelCase : Tuple = find_neighborhood(_lowercase , _lowercase )
UpperCAmelCase : List[str] = 0
UpperCAmelCase : str = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(_lowercase ) - 1
UpperCAmelCase : Tuple = False
while not found:
UpperCAmelCase : Dict = 0
while i < len(_lowercase ):
if best_solution[i] != solution[i]:
UpperCAmelCase : Any = best_solution[i]
UpperCAmelCase : Union[str, Any] = solution[i]
break
UpperCAmelCase : str = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Tuple = cost
UpperCAmelCase : Optional[Any] = solution
else:
UpperCAmelCase : List[str] = index_of_best_solution + 1
UpperCAmelCase : Optional[int] = neighborhood[index_of_best_solution]
if len(_lowercase ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : Union[str, Any] = count + 1
return best_solution_ever, best_cost
def __lowerCamelCase ( _lowercase=None ) -> Optional[int]:
UpperCAmelCase : str = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Optional[int] = generate_first_solution(
args.File , _lowercase )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = tabu_search(
_lowercase , _lowercase , _lowercase , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 265 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
UpperCAmelCase : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , )
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[Any]:
UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : List[Any] = torch.manual_seed(A )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> str:
UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A , A , A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Any = self.get_dummy_inputs(A )
UpperCAmelCase : Optional[Any] = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A )
UpperCAmelCase : Tuple = pipe_loaded(**A )[0]
UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(A , 1e-4 )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A )
UpperCAmelCase : List[Any] = pipe.generate_mask(**A )
UpperCAmelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase : Optional[int] = np.array([0] * 9 )
UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = """cpu"""
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : List[str] = pipe.invert(**A ).images
UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Dict = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
def _lowercase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = """cpu"""
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
UpperCAmelCase : int = DPMSolverMultistepScheduler(**A )
UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A )
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : Any = pipe.invert(**A ).images
UpperCAmelCase : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Any = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCAmelCase : List[str] = raw_image
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = """a bowl of fruit"""
UpperCAmelCase : List[Any] = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Tuple = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents
UpperCAmelCase : Any = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = """a bowl of fruit"""
UpperCAmelCase : int = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Any = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents
UpperCAmelCase : str = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 265 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = BertTokenizer
A_ = BertTokenizerFast
A_ = True
A_ = True
A_ = filter_non_english
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
__a : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = 'UNwant\u00E9d,running'
__a : List[str] = 'unwanted, running'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.tokenizer_class(self.vocab_file )
__a : int = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : Union[str, Any] = self.get_tokenizer()
__a : Dict = self.get_rust_tokenizer()
__a : int = 'UNwant\u00E9d,running'
__a : Tuple = tokenizer.tokenize(__a )
__a : List[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__a : Tuple = tokenizer.encode(__a , add_special_tokens=__a )
__a : str = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__a : Dict = self.get_rust_tokenizer()
__a : int = tokenizer.encode(__a )
__a : Tuple = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
__a : Optional[int] = self.get_tokenizer(do_lower_case=__a )
__a : Any = self.get_rust_tokenizer(do_lower_case=__a )
__a : List[str] = 'UNwant\u00E9d,running'
__a : int = tokenizer.tokenize(__a )
__a : List[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__a : Any = tokenizer.encode(__a , add_special_tokens=__a )
__a : str = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__a : Dict = self.get_rust_tokenizer()
__a : Optional[Any] = tokenizer.encode(__a )
__a : Optional[int] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = BasicTokenizer()
__a : Union[str, Any] = 'a\n\'ll !!to?\'d of, can\'t.'
__a : Any = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__a : str = {}
for i, token in enumerate(__a ):
__a : Union[str, Any] = i
__a : Dict = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
__a : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__a : Tuple = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__a : Any = tokenizer.build_inputs_with_special_tokens(__a )
__a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__a : Dict = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__a : str = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
__a : Optional[int] = tokenizer_r.do_lower_case if hasattr(__a , 'do_lower_case' ) else False
__a : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ['的', '人', '有']
__a : Optional[Any] = ''.join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : List[Any] = True
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__a : Tuple = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__a : Optional[int] = tokenizer_p.encode(__a , add_special_tokens=__a )
__a : str = tokenizer_r.encode(__a , add_special_tokens=__a )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(__a )
__a : Any = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
__a : Optional[int] = False
__a : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__a : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__a : Tuple = tokenizer_r.encode(__a , add_special_tokens=__a )
__a : Union[str, Any] = tokenizer_p.encode(__a , add_special_tokens=__a )
__a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
__a : str = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
__a : int = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 294 |
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294 | 1 |
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCamelCase : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def _a ( SCREAMING_SNAKE_CASE : str="no" , SCREAMING_SNAKE_CASE : str = default_json_config_file , SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Path(snake_case__ )
path.parent.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
UpperCamelCase__ : Union[str, Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}" )
UpperCamelCase__ : Optional[Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
UpperCamelCase__ : Optional[Any] = torch.cuda.device_count()
UpperCamelCase__ : Any = num_gpus
UpperCamelCase__ : List[Any] = False
if num_gpus > 1:
UpperCamelCase__ : List[str] = '''MULTI_GPU'''
else:
UpperCamelCase__ : List[Any] = '''NO'''
elif is_xpu_available() and use_xpu:
UpperCamelCase__ : Dict = torch.xpu.device_count()
UpperCamelCase__ : List[Any] = num_xpus
UpperCamelCase__ : Union[str, Any] = False
if num_xpus > 1:
UpperCamelCase__ : List[Any] = '''MULTI_XPU'''
else:
UpperCamelCase__ : Union[str, Any] = '''NO'''
elif is_npu_available():
UpperCamelCase__ : List[str] = torch.npu.device_count()
UpperCamelCase__ : Optional[int] = num_npus
UpperCamelCase__ : Optional[Any] = False
if num_npus > 1:
UpperCamelCase__ : Dict = '''MULTI_NPU'''
else:
UpperCamelCase__ : Optional[Any] = '''NO'''
else:
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Any = '''NO'''
UpperCamelCase__ : Optional[int] = ClusterConfig(**snake_case__ )
config.to_json_file(snake_case__ )
return path
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ : Tuple = parser.add_parser('''default''' , parents=snake_case__ , help=snake_case__ , formatter_class=snake_case__ )
parser.add_argument(
'''--config_file''' , default=snake_case__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=snake_case__ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=snake_case__ )
return parser
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 146 |
"""simple docstring"""
from __future__ import annotations
import requests
def _snake_case ( snake_case__ : str ):
A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case__ ).json()
def _snake_case ( snake_case__ : int = 10 ):
A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
A = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def _snake_case ( snake_case__ : int = 10 ):
A = hackernews_top_stories(snake_case__ )
return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 74 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_ = {"""facebook/bart-base""": BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_ = {"""facebook/bart-base""": BartTokenizer}
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--config_name""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_SCREAMING_SNAKE_CASE , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Where to store the final ONNX file.""" )
SCREAMING_SNAKE_CASE = parser.parse_args()
return args
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model_dict[model_name].from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = tokenizer_dict[model_name].from_pretrained(_SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
return huggingface_model, tokenizer
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
model.eval()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = torch.jit.script(BARTBeamSearchGenerator(_SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = """My friends are cool but they eat too many carbs."""
SCREAMING_SNAKE_CASE = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="""pt""" ).to(model.device )
SCREAMING_SNAKE_CASE = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , early_stopping=_SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_SCREAMING_SNAKE_CASE , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _SCREAMING_SNAKE_CASE , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_SCREAMING_SNAKE_CASE , )
logger.info("""Model exported to {}""".format(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = remove_dup_initializers(os.path.abspath(_SCREAMING_SNAKE_CASE ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = onnxruntime.InferenceSession(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ort_sess.run(
_SCREAMING_SNAKE_CASE , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_SCREAMING_SNAKE_CASE ),
"""max_length""": np.array(_SCREAMING_SNAKE_CASE ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parse_args()
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE = torch.device(args.device )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = load_model_tokenizer(args.model_name_or_path , _SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_SCREAMING_SNAKE_CASE )
if args.max_length:
SCREAMING_SNAKE_CASE = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE = args.output_file_path
else:
SCREAMING_SNAKE_CASE = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 193 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def _lowercase ( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase , 'Please use tf.data to implement this functionality.' )
def _lowercase ( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = _readaa(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = _readaa(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
return data
@deprecated(_lowerCAmelCase , 'Please use tf.one_hot on tensors.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_lowerCAmelCase ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_lowerCAmelCase , 'Please use tf.data to implement this functionality.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=10 ) -> Union[str, Any]:
'''simple docstring'''
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = bytestream.read(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase , _lowerCAmelCase )
return labels
class lowercase__ :
@deprecated(
_A , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=dtypes.floataa , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=None , ):
SCREAMING_SNAKE_CASE__ = random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 10000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(_A , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def A_ ( self : int ):
return self._images
@property
def A_ ( self : Tuple ):
return self._labels
@property
def A_ ( self : List[str] ):
return self._num_examples
@property
def A_ ( self : Optional[int] ):
return self._epochs_completed
def A_ ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=True ):
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase , 'Please write your own downloading logic.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase , _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('Successfully downloaded' , _lowerCAmelCase , _lowerCAmelCase , 'bytes.' )
return filepath
@deprecated(
_lowerCAmelCase , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=dtypes.floataa , UpperCamelCase_=True , UpperCamelCase_=5000 , UpperCamelCase_=None , UpperCamelCase_=DEFAULT_SOURCE_URL , ) -> Optional[Any]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCAmelCase , one_hot=_lowerCAmelCase , dtype=_lowerCAmelCase , seed=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = """train-images-idx3-ubyte.gz"""
SCREAMING_SNAKE_CASE__ = """train-labels-idx1-ubyte.gz"""
SCREAMING_SNAKE_CASE__ = """t10k-images-idx3-ubyte.gz"""
SCREAMING_SNAKE_CASE__ = """t10k-labels-idx1-ubyte.gz"""
SCREAMING_SNAKE_CASE__ = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_images_file )
with gfile.Open(_lowerCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_images_file )
with gfile.Open(_lowerCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = (
"""Validation size should be between 0 and """
F'{len(_lowerCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
| 176 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ : List[Any] = '''src/transformers'''
lowerCamelCase__ : Union[str, Any] = '''docs/source/en'''
lowerCamelCase__ : Optional[int] = '''.'''
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
with open(_lowerCAmelCase, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
_UpperCAmelCase : str = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Dict = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ : Dict = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ : Union[str, Any] = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase__ : Optional[int] = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ : Any = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> Any:
_UpperCAmelCase : Optional[int] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : int ) -> Any:
_UpperCAmelCase : Union[str, Any] = 2 if text == """✅""" or text == """❌""" else len(_lowerCAmelCase )
_UpperCAmelCase : str = (width - text_length) // 2
_UpperCAmelCase : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCamelCase ( ) -> List[Any]:
_UpperCAmelCase : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase : int = {name: config.replace("""Config""", """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase : Dict = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : str = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCAmelCase ):
_UpperCAmelCase : List[str] = None
if attr_name.endswith("""Tokenizer""" ):
_UpperCAmelCase : Optional[int] = slow_tokenizers
_UpperCAmelCase : Optional[int] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
_UpperCAmelCase : List[Any] = fast_tokenizers
_UpperCAmelCase : str = attr_name[:-13]
elif _re_tf_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Tuple = tf_models
_UpperCAmelCase : Any = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Any = flax_models
_UpperCAmelCase : List[Any] = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Union[str, Any] = pt_models
_UpperCAmelCase : List[Any] = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase : List[str] = True
break
# Try again after removing the last word in the name
_UpperCAmelCase : Optional[Any] = """""".join(camel_case_split(_lowerCAmelCase )[:-1] )
# Let's build that table!
_UpperCAmelCase : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase : List[Any] = [len(_lowerCAmelCase ) + 2 for c in columns]
_UpperCAmelCase : Optional[int] = max([len(_lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase : Tuple = """|""" + """|""".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for c, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
_UpperCAmelCase : Dict = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCAmelCase : Optional[int] = model_name_to_prefix[name]
_UpperCAmelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for l, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + "|\n"
return table
def UpperCamelCase ( _lowerCAmelCase : Any=False ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase, """index.md""" ), start_prompt="""<!--This table is updated automatically from the auto modules""", end_prompt="""<!-- End table-->""", )
_UpperCAmelCase : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCAmelCase, """index.md""" ), """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ : List[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 246 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_A : str = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_A : Tuple = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_A : Union[str, Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
def remove_articles(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCAmelCase , ''' ''' , UpperCAmelCase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase ) ) ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
return int(normalize_answer(UpperCAmelCase ) == normalize_answer(UpperCAmelCase ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Any = [any(compute_exact(UpperCAmelCase , UpperCAmelCase ) for ref in refs ) for pred, refs in zip(UpperCAmelCase , UpperCAmelCase )]
return (sum(UpperCAmelCase ) / len(UpperCAmelCase )) * 100
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase__ : int = Counter(UpperCAmelCase )
lowerCamelCase__ : Any = Counter(UpperCAmelCase )
lowerCamelCase__ : str = Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase__ : Tuple = scount * numref
lowerCamelCase__ : str = Counter(UpperCAmelCase )
lowerCamelCase__ : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase__ : int = ccount * numref
# KEEP
lowerCamelCase__ : List[str] = sgramcounter_rep & cgramcounter_rep
lowerCamelCase__ : str = keepgramcounter_rep & rgramcounter
lowerCamelCase__ : List[Any] = sgramcounter_rep & rgramcounter
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : List[str] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Tuple = 1
if len(UpperCAmelCase ) > 0:
lowerCamelCase__ : List[str] = keeptmpscorea / len(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase__ : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase__ : Dict = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase__ : Tuple = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase__ : str = sgramcounter_rep - cgramcounter_rep
lowerCamelCase__ : Tuple = delgramcounter_rep - rgramcounter
lowerCamelCase__ : Optional[int] = sgramcounter_rep - rgramcounter
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ : Dict = 1
if len(UpperCAmelCase ) > 0:
lowerCamelCase__ : str = deltmpscorea / len(UpperCAmelCase )
# ADDITION
lowerCamelCase__ : str = set(UpperCAmelCase ) - set(UpperCAmelCase )
lowerCamelCase__ : Tuple = set(UpperCAmelCase ) & set(UpperCAmelCase )
lowerCamelCase__ : int = set(UpperCAmelCase ) - set(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = 1
if len(UpperCAmelCase ) > 0:
lowerCamelCase__ : str = addtmpscore / len(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Optional[int] = addtmpscore / len(UpperCAmelCase )
lowerCamelCase__ : List[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase__ : Tuple = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = len(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = ssent.split(''' ''' )
lowerCamelCase__ : Dict = csent.split(''' ''' )
lowerCamelCase__ : int = []
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Any = []
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[Any] = []
for rsent in rsents:
lowerCamelCase__ : Union[str, Any] = rsent.split(''' ''' )
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Optional[int] = []
ragramslist.append(UpperCAmelCase )
for i in range(0 , len(UpperCAmelCase ) - 1 ):
if i < len(UpperCAmelCase ) - 1:
lowerCamelCase__ : Tuple = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCAmelCase )
if i < len(UpperCAmelCase ) - 2:
lowerCamelCase__ : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCAmelCase )
if i < len(UpperCAmelCase ) - 3:
lowerCamelCase__ : Tuple = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCAmelCase )
ragramslist.append(UpperCAmelCase )
ragramslist.append(UpperCAmelCase )
ragramslist.append(UpperCAmelCase )
for i in range(0 , len(UpperCAmelCase ) - 1 ):
if i < len(UpperCAmelCase ) - 1:
lowerCamelCase__ : List[str] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCAmelCase )
if i < len(UpperCAmelCase ) - 2:
lowerCamelCase__ : Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCAmelCase )
if i < len(UpperCAmelCase ) - 3:
lowerCamelCase__ : str = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCAmelCase )
for i in range(0 , len(UpperCAmelCase ) - 1 ):
if i < len(UpperCAmelCase ) - 1:
lowerCamelCase__ : List[Any] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCAmelCase )
if i < len(UpperCAmelCase ) - 2:
lowerCamelCase__ : List[str] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCAmelCase )
if i < len(UpperCAmelCase ) - 3:
lowerCamelCase__ : List[Any] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCAmelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Union[str, Any] = SARIngram(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = SARIngram(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : int = SARIngram(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = SARIngram(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase__ : Any = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase__ : Any = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase__ : Any = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _a ( UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = "13a" , UpperCAmelCase = True ) -> Any:
"""simple docstring"""
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowerCamelCase__ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase__ : Union[str, Any] = sacrebleu.metrics.bleu._get_tokenizer(UpperCAmelCase )()(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(UpperCAmelCase )
elif tokenizer == "moses":
lowerCamelCase__ : Any = sacremoses.MosesTokenizer().tokenize(UpperCAmelCase , return_str=UpperCAmelCase , escape=UpperCAmelCase )
elif tokenizer == "penn":
lowerCamelCase__ : Optional[Any] = sacremoses.MosesTokenizer().penn_tokenize(UpperCAmelCase , return_str=UpperCAmelCase )
else:
lowerCamelCase__ : Optional[int] = sentence
if not return_str:
lowerCamelCase__ : Dict = normalized_sent.split()
return normalized_sent
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
if not (len(UpperCAmelCase ) == len(UpperCAmelCase ) == len(UpperCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowerCamelCase__ : Optional[int] = 0
for src, pred, refs in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
sari_score += SARIsent(normalize(UpperCAmelCase ) , normalize(UpperCAmelCase ) , [normalize(UpperCAmelCase ) for sent in refs] )
lowerCamelCase__ : Optional[Any] = sari_score / len(UpperCAmelCase )
return 100 * sari_score
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="exp" , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , ) -> str:
"""simple docstring"""
lowerCamelCase__ : Tuple = len(references[0] )
if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase__ : int = [[refs[i] for refs in references] for i in range(UpperCAmelCase )]
lowerCamelCase__ : int = sacrebleu.corpus_bleu(
UpperCAmelCase , UpperCAmelCase , smooth_method=UpperCAmelCase , smooth_value=UpperCAmelCase , force=UpperCAmelCase , lowercase=UpperCAmelCase , use_effective_order=UpperCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCamelCase ( self : List[Any] , A : Any , A : Tuple , A : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : str = {}
result.update({'''sari''': compute_sari(sources=A , predictions=A , references=A )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=A , references=A )} )
result.update({'''exact''': compute_em(predictions=A , references=A )} )
return result
| 265 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_A : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_A : Any = [0, 25, 50]
_A : Dict = [25, 50, 75]
_A : Any = fuzz.membership.trimf(X, abca)
_A : List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_A : List[str] = np.ones(75)
_A : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_A : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_A : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_A : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_A : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_A : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_A : Optional[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_A : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_A : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 265 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( ):
UpperCamelCase :Optional[Any] = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE__ ):
yield num
num += 1
def _A ( SCREAMING_SNAKE_CASE__ : int = 2000000 ):
return sum(takewhile(lambda SCREAMING_SNAKE_CASE__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=() , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]="no" , SCREAMING_SNAKE_CASE__ : Dict="29500" ):
UpperCamelCase :List[Any] = False
UpperCamelCase :Tuple = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCamelCase :Dict = True
elif "IPython" in sys.modules:
UpperCamelCase :int = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCamelCase :Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , SCREAMING_SNAKE_CASE__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCamelCase :Tuple = 8
UpperCamelCase :Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*SCREAMING_SNAKE_CASE__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr='''127.0.01''' , master_port=SCREAMING_SNAKE_CASE__ , mixed_precision=SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase :Any = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=() , SCREAMING_SNAKE_CASE__ : int=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCamelCase :Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , debug=SCREAMING_SNAKE_CASE__ )
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
| 259 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
if index == r:
for j in range(_SCREAMING_SNAKE_CASE ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
a__: Any = arr[i]
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
# A temporary array to store all combination one by one
a__: int = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 203 | """simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowercase__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowercase__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowercase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowercase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__lowerCAmelCase )
class __snake_case :
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
elif titles is None or texts is None:
a__: str = titles if texts is None else texts
return super().__call__(
lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
a__: Tuple = titles if not isinstance(lowercase , lowercase) else [titles]
a__: Optional[int] = texts if not isinstance(lowercase , lowercase) else [texts]
a__: Dict = len(lowercase)
a__: Dict = questions if not isinstance(lowercase , lowercase) else [questions] * n_passages
if len(lowercase) != len(lowercase):
raise ValueError(
f'There should be as many titles than texts but got {len(lowercase)} titles and {len(lowercase)} texts.')
a__: List[str] = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase)['input_ids']
a__: List[Any] = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase)['input_ids']
a__: Optional[Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase , lowercase)
]
}
if return_attention_mask is not False:
a__: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
a__: int = attention_mask
return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__: Dict = reader_input['input_ids']
a__ , a__ , a__: Tuple = reader_output[:3]
a__: Tuple = len(lowercase)
a__: Optional[Any] = sorted(range(lowercase) , reverse=lowercase , key=relevance_logits.__getitem__)
a__: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
a__: Tuple = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
a__: Dict = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__: Any = sequence_ids.index(self.pad_token_id)
else:
a__: Optional[Any] = len(lowercase)
a__: Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowercase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__: Optional[Any] = []
for start_index, start_score in enumerate(lowercase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
a__: str = sorted(lowercase , key=lambda lowercase: x[1] , reverse=lowercase)
a__: Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]')
a__: str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowercase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ["""input_ids""", """attention_mask"""]
| 203 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 294 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> Dict:
_a : Optional[int] = tempfile.mkdtemp()
_a : Optional[Any] = SamImageProcessor()
_a : int = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : str ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) -> Dict:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Dict:
_a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
_a : Optional[Any] = self.get_image_processor()
_a : int = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = [torch.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : Optional[int] = [[683, 1024]]
_a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : int = processor.post_process_masks(
UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase__ ):
_a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> List[str]:
_a : List[str] = tempfile.mkdtemp()
_a : Any = SamImageProcessor()
_a : Union[str, Any] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : int = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _lowercase ( self : Optional[Any] ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Any = [tf.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : str = [[683, 1024]]
_a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Union[str, Any] = processor.post_process_masks(
UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : List[Any] = [np.ones((1, 3, 5, 5) )]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Dict = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_a : List[Any] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Optional[Any]:
_a : Optional[Any] = tempfile.mkdtemp()
_a : Dict = SamImageProcessor()
_a : List[str] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : str ) -> int:
_a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowercase ( self : int ) -> List[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_a : str = [tf.convert_to_tensor(UpperCAmelCase__ )]
_a : Optional[int] = [torch.tensor(UpperCAmelCase__ )]
_a : Union[str, Any] = [[1764, 2646]]
_a : List[str] = [[683, 1024]]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
_a : List[str] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowercase ( self : str ) -> Optional[Any]:
_a : List[Any] = self.get_image_processor()
_a : Any = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Dict = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 294 | 1 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase : Optional[int] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 53 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCamelCase: Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 53 | 1 |
import os
from datetime import datetime as dt
from github import Github
a__: List[Any] = [
'good first issue',
'feature request',
'wip',
]
def UpperCamelCase__( )->Optional[Any]:
A__ = Github(os.environ['''GITHUB_TOKEN'''] )
A__ = g.get_repo('''huggingface/accelerate''' )
A__ = repo.get_issues(state='''open''' )
for issue in open_issues:
A__ = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
A__ = comments[0] if len(UpperCamelCase__ ) > 0 else None
A__ = dt.utcnow()
A__ = (current_time - issue.updated_at).days
A__ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 193 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a__: int = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None,__lowerCamelCase=None ):
A__ = self.layer[current_layer](__lowerCamelCase,__lowerCamelCase,head_mask[current_layer] )
A__ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase ):
super().__init__(__lowerCamelCase )
A__ = BertEncoderWithPabee(__lowerCamelCase )
self.init_weights()
A__ = 0
A__ = 0
A__ = 0
A__ = 0
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = threshold
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = patience
def UpperCamelCase ( self ):
A__ = 0
A__ = 0
def UpperCamelCase ( self ):
A__ = self.inference_layers_num / self.inference_instances_num
A__ = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(__lowerCamelCase )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=False,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
A__ = input_ids.size()
elif inputs_embeds is not None:
A__ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
A__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A__ = torch.ones(__lowerCamelCase,device=__lowerCamelCase )
if token_type_ids is None:
A__ = torch.zeros(__lowerCamelCase,dtype=torch.long,device=__lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A__ = self.get_extended_attention_mask(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
A__ , A__ , A__ = encoder_hidden_states.size()
A__ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
A__ = torch.ones(__lowerCamelCase,device=__lowerCamelCase )
A__ = self.invert_attention_mask(__lowerCamelCase )
else:
A__ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A__ = self.get_head_mask(__lowerCamelCase,self.config.num_hidden_layers )
A__ = self.embeddings(
input_ids=__lowerCamelCase,position_ids=__lowerCamelCase,token_type_ids=__lowerCamelCase,inputs_embeds=__lowerCamelCase )
A__ = embedding_output
if self.training:
A__ = []
for i in range(self.config.num_hidden_layers ):
A__ = self.encoder.adaptive_forward(
__lowerCamelCase,current_layer=__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase )
A__ = self.pooler(__lowerCamelCase )
A__ = output_layers[i](output_dropout(__lowerCamelCase ) )
res.append(__lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
A__ = self.encoder(
__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase,encoder_hidden_states=__lowerCamelCase,encoder_attention_mask=__lowerCamelCase,)
A__ = self.pooler(encoder_outputs[0] )
A__ = [output_layers[self.config.num_hidden_layers - 1](__lowerCamelCase )]
else:
A__ = 0
A__ = None
A__ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
A__ = self.encoder.adaptive_forward(
__lowerCamelCase,current_layer=__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase )
A__ = self.pooler(__lowerCamelCase )
A__ = output_layers[i](__lowerCamelCase )
if regression:
A__ = logits.detach()
if patient_result is not None:
A__ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
A__ = 0
else:
A__ = logits.detach().argmax(dim=1 )
if patient_result is not None:
A__ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__lowerCamelCase ) ):
patient_counter += 1
else:
A__ = 0
A__ = logits
if patient_counter == self.patience:
break
A__ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase ):
super().__init__(__lowerCamelCase )
A__ = config.num_labels
A__ = BertModelWithPabee(__lowerCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.ModuleList(
[nn.Linear(config.hidden_size,self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,):
A__ = self.bert(
input_ids=__lowerCamelCase,attention_mask=__lowerCamelCase,token_type_ids=__lowerCamelCase,position_ids=__lowerCamelCase,head_mask=__lowerCamelCase,inputs_embeds=__lowerCamelCase,output_dropout=self.dropout,output_layers=self.classifiers,regression=self.num_labels == 1,)
A__ = (logits[-1],)
if labels is not None:
A__ = None
A__ = 0
for ix, logits_item in enumerate(__lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits_item.view(-1 ),labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits_item.view(-1,self.num_labels ),labels.view(-1 ) )
if total_loss is None:
A__ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
A__ = (total_loss / total_weights,) + outputs
return outputs
| 193 | 1 |
import torch
def __UpperCamelCase ( ) ->str:
"""simple docstring"""
if torch.cuda.is_available():
lowerCamelCase_ =torch.cuda.device_count()
else:
lowerCamelCase_ =0
print(f'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 360 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )-> Dict:
lowerCamelCase_ =None
lowerCamelCase_ =20
lowerCamelCase_ =self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
lowerCamelCase_ =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ =jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )-> Any:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create ramp distribution
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ =ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ =5
lowerCamelCase_ =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ =top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ =np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ =ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
lowerCamelCase_ =ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ =5
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =15
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ =ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ =1
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> int:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =5
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ =ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ =4
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a : List[str] = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : Any = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : List[str] = R""".*/layers_(\d+)"""
UpperCAmelCase : Optional[int] = key
if re.match(_lowercase , _lowercase ):
UpperCAmelCase : Tuple = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , _lowercase )
UpperCAmelCase : List[Any] = R"""(encoder|decoder)\/"""
if re.match(_lowercase , _lowercase ):
UpperCAmelCase : List[str] = re.match(_lowercase , _lowercase ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Dict = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , _lowercase )
UpperCAmelCase : Union[str, Any] = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , _lowercase )
elif groups[0] == "decoder":
UpperCAmelCase : List[Any] = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , _lowercase )
UpperCAmelCase : Dict = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , _lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : str = new_key.replace(_lowercase , _lowercase )
print(F'''{key} -> {new_key}''' )
UpperCAmelCase : str = s_dict.pop(_lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Any = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Union[str, Any] = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : Tuple = s_dict[key].shape[0]
UpperCAmelCase : Optional[int] = s_dict[key]
for idx in range(_lowercase ):
UpperCAmelCase : int = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(_lowercase )
return s_dict
a : Optional[int] = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(_lowercase , """r""" ) as f:
UpperCAmelCase : Any = f.read()
UpperCAmelCase : List[str] = re.findall(R"""(.*) = ([0-9.]*)""" , _lowercase )
UpperCAmelCase : Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Any = float(_lowercase ) if """.""" in value else int(_lowercase )
UpperCAmelCase : List[str] = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , _lowercase )[0]
UpperCAmelCase : Optional[Any] = str(activation[1] )
UpperCAmelCase : List[str] = num_experts
UpperCAmelCase : List[str] = SwitchTransformersConfig(**_lowercase )
return config
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None , _lowercase="./" , _lowercase=8 ) -> Optional[Any]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(_lowercase )
if gin_file is not None:
UpperCAmelCase : List[str] = convert_gin_to_config(_lowercase , _lowercase )
else:
UpperCAmelCase : Any = SwitchTransformersConfig.from_pretrained(_lowercase )
UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(_lowercase )
UpperCAmelCase : Tuple = flax_params["""target"""]
UpperCAmelCase : List[str] = flatten_dict(_lowercase , sep="""/""" )
UpperCAmelCase : List[Any] = rename_keys(_lowercase )
UpperCAmelCase : List[str] = unflatten_dict(_lowercase , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowercase , _lowercase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
a : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 265 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( _lowercase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Any = create_tensor(_lowercase )
UpperCAmelCase : Union[str, Any] = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Any = [state.process_index]
UpperCAmelCase : Union[str, Any] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Optional[int] = create_tensor(_lowercase )
UpperCAmelCase : List[str] = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( _lowercase ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Optional[Any] = create_tensor(_lowercase )
UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" )
UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Tuple = create_tensor(_lowercase )
UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" )
UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_lowercase )
state.print("""testing gather_object""" )
test_gather_object(_lowercase )
state.print("""testing broadcast""" )
test_broadcast(_lowercase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowercase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowercase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 265 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=_a ):
lowercase__ : Optional[int] = ["""flax"""]
def __init__( self : Any , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : Union[str, Any] , *_UpperCamelCase : int , **_UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : Any , *_UpperCamelCase : List[str] , **_UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : List[Any] = ["""flax"""]
def __init__( self : Any , *_UpperCamelCase : Dict , **_UpperCamelCase : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : Dict , *_UpperCamelCase : int , **_UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : Union[str, Any] , *_UpperCamelCase : Any , **_UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : Dict = ["""flax"""]
def __init__( self : Optional[int] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : List[Any] , *_UpperCamelCase : str , **_UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : List[str] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : Any = ["""flax"""]
def __init__( self : List[Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : str ) -> int:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : Dict , *_UpperCamelCase : str , **_UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : str , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : int = ["""flax"""]
def __init__( self : int , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : Optional[int] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : List[str] , *_UpperCamelCase : str , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : str = ["""flax"""]
def __init__( self : Optional[int] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) -> Dict:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : str , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : Union[str, Any] = ["""flax"""]
def __init__( self : Optional[int] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : Tuple , *_UpperCamelCase : Any , **_UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : List[str] = ["""flax"""]
def __init__( self : int , *_UpperCamelCase : List[str] , **_UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : str , *_UpperCamelCase : int , **_UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : str , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : List[Any] = ["""flax"""]
def __init__( self : Union[str, Any] , *_UpperCamelCase : int , **_UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : int , *_UpperCamelCase : int , **_UpperCamelCase : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : str , *_UpperCamelCase : str , **_UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : List[Any] = ["""flax"""]
def __init__( self : int , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : int , *_UpperCamelCase : List[str] , **_UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : int , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : Tuple = ["""flax"""]
def __init__( self : List[str] , *_UpperCamelCase : str , **_UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : Dict , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : int ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : Optional[int] , *_UpperCamelCase : Dict , **_UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : List[str] = ["""flax"""]
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : Optional[Any] , *_UpperCamelCase : int , **_UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : Optional[Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : int ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase ( metaclass=_a ):
lowercase__ : Optional[int] = ["""flax"""]
def __init__( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Any ) -> Any:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def __snake_case( cls : List[str] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def __snake_case( cls : Optional[Any] , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
| 370 | from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] ):
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : float = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_value
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : Dict , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 206 | 0 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 131072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
}
def __lowerCAmelCase ( lowercase : Any , lowercase : Dict ) -> Tuple:
"""simple docstring"""
return torch.atana(lowercase , lowercase ) / math.pi * 2
def __lowerCAmelCase ( lowercase : str ) -> List[str]:
"""simple docstring"""
snake_case : str = torch.sin(t * math.pi / 2 ) ** 2
snake_case : Tuple = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase , lowercase )
class _lowerCAmelCase ( snake_case_ ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case : Optional[Any] = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 )
snake_case : Union[str, Any] = deepcopy(self.diffusion )
snake_case : Tuple = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ )
def __lowerCAmelCase ( lowercase : Tuple ) -> int:
"""simple docstring"""
snake_case : List[str] = MODELS_MAP[model_name]["url"]
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__snake_case = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
__snake_case = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
__snake_case = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
__snake_case = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
__snake_case = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
__snake_case = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def __lowerCAmelCase ( lowercase : Tuple ) -> List[str]:
"""simple docstring"""
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCAmelCase ( lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(lowercase ) and not isinstance(lowercase , lowercase ):
return name.replace(lowercase , lowercase )
elif name.startswith(lowercase ):
return [name.replace(lowercase , lowercase ) for v in value]
raise ValueError(F'Attn error with {name}' )
def __lowerCAmelCase ( lowercase : str , lowercase : Optional[Any]=13 ) -> Optional[int]:
"""simple docstring"""
snake_case : int = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
snake_case : int = 0
if string.startswith("net.3." ):
depth += 1
snake_case : List[str] = string[6:]
elif string.startswith("net." ):
snake_case : Optional[Any] = string[4:]
while string.startswith("main.7." ):
depth += 1
snake_case : Tuple = string[7:]
if string.startswith("main." ):
snake_case : int = string[5:]
# mid block
if string[:2].isdigit():
snake_case : Optional[Any] = string[:2]
snake_case : List[str] = string[2:]
else:
snake_case : Optional[int] = string[0]
snake_case : Union[str, Any] = string[1:]
if depth == max_depth:
snake_case : Optional[Any] = MID_NUM_TO_LAYER[layer_num]
snake_case : Tuple = "mid_block"
elif depth > 0 and int(lowercase ) < 7:
snake_case : List[str] = DOWN_NUM_TO_LAYER[layer_num]
snake_case : Dict = F'down_blocks.{depth}'
elif depth > 0 and int(lowercase ) > 7:
snake_case : Union[str, Any] = UP_NUM_TO_LAYER[layer_num]
snake_case : List[Any] = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
snake_case : Optional[int] = DEPTH_0_TO_LAYER[layer_num]
snake_case : List[Any] = F'up_blocks.{max_depth - 1}' if int(lowercase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
snake_case : Tuple = string_left[1:]
if "resnets" in new_layer:
snake_case : Union[str, Any] = convert_resconv_naming(lowercase )
elif "attentions" in new_layer:
snake_case : Union[str, Any] = convert_attn_naming(lowercase )
snake_case : List[str] = new_string_left
if not isinstance(lowercase , lowercase ):
snake_case : List[Any] = prefix + "." + new_layer + "." + string_left
else:
snake_case : Any = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCAmelCase ( lowercase : List[Any] ) -> Any:
"""simple docstring"""
snake_case : int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
snake_case : int = rename(lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase , lowercase ):
snake_case : str = transform_conv_attns(lowercase , lowercase , lowercase )
else:
snake_case : str = v
return new_state_dict
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Any , lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if len(lowercase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case : Dict = v[:, :, 0]
else:
# bias
snake_case : Dict = v
else:
# qkv matrices
snake_case : str = v.shape[0]
snake_case : int = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case : List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCAmelCase ( lowercase : Tuple ) -> List[str]:
"""simple docstring"""
snake_case : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
snake_case : Optional[int] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
snake_case : int = download(lowercase )
snake_case : Optional[int] = MODELS_MAP[model_name]["sample_rate"]
snake_case : List[str] = MODELS_MAP[model_name]["sample_size"]
snake_case : List[str] = Object()
snake_case : Union[str, Any] = sample_size
snake_case : List[str] = sample_rate
snake_case : int = 0
snake_case : List[str] = UNetaDModel(sample_size=lowercase , sample_rate=lowercase )
snake_case : Optional[int] = diffusers_model.state_dict()
snake_case : List[Any] = DiffusionUncond(lowercase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowercase )["state_dict"] )
snake_case : Any = orig_model.diffusion_ema.eval()
snake_case : str = orig_model.state_dict()
snake_case : str = rename_orig_weights(lowercase )
snake_case : List[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case : Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("kernel" ) for k in list(lowercase ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
snake_case : int = value.squeeze()
snake_case : Dict = value
diffusers_model.load_state_dict(lowercase )
snake_case : str = 100
snake_case : Dict = 33
snake_case : Any = IPNDMScheduler(num_train_timesteps=lowercase )
snake_case : Optional[int] = torch.manual_seed(lowercase )
snake_case : List[Any] = torch.randn([1, 2, config.sample_size] , generator=lowercase ).to(lowercase )
snake_case : Union[str, Any] = torch.linspace(1 , 0 , steps + 1 , device=lowercase )[:-1]
snake_case : str = get_crash_schedule(lowercase )
snake_case : str = DanceDiffusionPipeline(unet=lowercase , scheduler=lowercase )
snake_case : Tuple = torch.manual_seed(33 )
snake_case : List[str] = pipe(num_inference_steps=lowercase , generator=lowercase ).audios
snake_case : Optional[int] = sampling.iplms_sample(lowercase , lowercase , lowercase , {} )
snake_case : Any = generated.clamp(-1 , 1 )
snake_case : List[Any] = (generated - audio).abs().sum()
snake_case : Optional[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , lowercase )
print("Diff max" , lowercase )
assert diff_max < 1e-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__snake_case = parser.parse_args()
main(args)
| 203 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = JukeboxTokenizer
__UpperCAmelCase : Union[str, Any] = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
import torch
snake_case : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case : Optional[int] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
import torch
snake_case : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case : List[Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 203 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =None
a__ =None
def lowerCamelCase_ ():
_UpperCAmelCase : str = Node(1 )
_UpperCAmelCase : str = Node(2 )
_UpperCAmelCase : Tuple = Node(3 )
_UpperCAmelCase : Union[str, Any] = Node(4 )
_UpperCAmelCase : Union[str, Any] = Node(5 )
return tree
def lowerCamelCase_ (UpperCamelCase__ : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase_ (UpperCamelCase__ : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase_ (UpperCamelCase__ : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase_ (UpperCamelCase__ : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase_ (UpperCamelCase__ : Node | None ):
_UpperCAmelCase : list[Any] = []
if root is None:
return output
_UpperCAmelCase : Dict = deque([root] )
while process_queue:
_UpperCAmelCase : int = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase_ (UpperCamelCase__ : Node | None , UpperCamelCase__ : int ):
_UpperCAmelCase : list[Any] = []
def populate_output(UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def lowerCamelCase_ (UpperCamelCase__ : Node | None , UpperCamelCase__ : int ):
_UpperCAmelCase : list[Any] = []
def populate_output(UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def lowerCamelCase_ (UpperCamelCase__ : Node | None ):
if root is None:
return []
_UpperCAmelCase : list[Sequence[Node | None]] = []
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Union[str, Any] = height(UpperCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase : Tuple = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase : List[Any] = 0
return output
def lowerCamelCase_ (): # Main function for testing.
_UpperCAmelCase : Dict = make_tree()
print(F'In-order Traversal: {inorder(UpperCamelCase__ )}' )
print(F'Pre-order Traversal: {preorder(UpperCamelCase__ )}' )
print(F'Post-order Traversal: {postorder(UpperCamelCase__ )}' , '''\n''' )
print(F'Height of Tree: {height(UpperCamelCase__ )}' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(UpperCamelCase__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(UpperCamelCase__ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 |
"""simple docstring"""
from itertools import count
def lowerCamelCase_ (UpperCamelCase__ : int = 50 ):
_UpperCAmelCase : Tuple = [1] * min_block_length
for n in count(UpperCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 68 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__UpperCamelCase = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
__UpperCamelCase = sylvester(number - 1 )
__UpperCamelCase = num - 1
__UpperCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 53 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Tuple ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 53 | 1 |
"""simple docstring"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __UpperCamelCase () -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 358 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[int, int]:
def constraint_to_multiple_of(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=_SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=_SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = ['pixel_values']
def __init__( self : Any , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = False , a : int = 1 , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = size if size is not None else {'height': 384, 'width': 384}
lowercase__ = get_size_dict(a )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : bool = False , a : int = 1 , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase__ = get_resize_output_image_size(
a , output_size=(size['height'], size['width']) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , )-> str:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , )-> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : ImageInput , a : bool = None , a : int = None , a : bool = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : str , )-> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(a )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : List[Tuple] = None )-> Optional[int]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(a ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(a ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 269 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Optional[int] = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49 | 0 |
def a__ ( __UpperCamelCase ):
assert column_title.isupper()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) - 1
SCREAMING_SNAKE_CASE_ = 0
while index >= 0:
SCREAMING_SNAKE_CASE_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , __UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 363 | import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__lowercase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__lowercase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''whether to use adafactor'''} )
__lowercase : Optional[float] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__lowercase : Optional[float] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__lowercase : Optional[float] = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__lowercase : Optional[float] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__lowercase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 37 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=True , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=224 , lowercase=1000 , lowercase=[3, 3, 6, 4] , lowercase=[48, 56, 112, 220] , ):
A_ : Dict = parent
A_ : List[Any] = batch_size
A_ : Dict = num_channels
A_ : Optional[Any] = is_training
A_ : List[str] = use_labels
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = num_labels
A_ : List[str] = image_size
A_ : str = layer_depths
A_ : Optional[int] = embed_dims
def _a (self ):
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase , layer_scale_init_value=1E-5 , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = SwiftFormerModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = self.num_labels
A_ : Any = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A_ : int = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ):
((A_), (A_), (A_)) : int = self.prepare_config_and_inputs()
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _a (self ):
A_ : Optional[int] = SwiftFormerModelTester(self )
A_ : Any = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(lowercase )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = SwiftFormerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Any = outputs.hidden_states
A_ : Any = 8
self.assertEqual(len(lowercase ) , lowercase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
def _config_zero_init(lowercase ):
A_ : Optional[Any] = copy.deepcopy(lowercase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase , lowercase , 1E-10 )
if isinstance(getattr(lowercase , lowercase , lowercase ) , lowercase ):
A_ : Any = _config_zero_init(getattr(lowercase , lowercase ) )
setattr(lowercase , lowercase , lowercase )
return configs_no_init
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def a ( ):
'''simple docstring'''
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Any = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowercase )
A_ : Dict = self.default_image_processor
A_ : Dict = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(**lowercase )
# verify the logits
A_ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[str] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) | 206 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''gptj'''
__UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Any , snake_case :Union[str, Any]=50_400 , snake_case :Any=2_048 , snake_case :Optional[Any]=4_096 , snake_case :Tuple=28 , snake_case :List[Any]=16 , snake_case :Any=64 , snake_case :str=None , snake_case :Optional[Any]="gelu_new" , snake_case :Tuple=0.0 , snake_case :List[str]=0.0 , snake_case :Tuple=0.0 , snake_case :Optional[int]=1e-5 , snake_case :List[str]=0.02 , snake_case :int=True , snake_case :Any=50_256 , snake_case :Optional[Any]=50_256 , snake_case :int=False , **snake_case :Any , ):
'''simple docstring'''
A_ : Optional[int] = vocab_size
A_ : List[Any] = n_positions
A_ : Optional[int] = n_embd
A_ : List[Any] = n_layer
A_ : Union[str, Any] = n_head
A_ : Optional[Any] = n_inner
A_ : Any = rotary_dim
A_ : Any = activation_function
A_ : Dict = resid_pdrop
A_ : Optional[Any] = embd_pdrop
A_ : Dict = attn_pdrop
A_ : Optional[Any] = layer_norm_epsilon
A_ : List[str] = initializer_range
A_ : Tuple = use_cache
A_ : Any = bos_token_id
A_ : List[Any] = eos_token_id
super().__init__(
bos_token_id=snake_case , eos_token_id=snake_case , tie_word_embeddings=snake_case , **snake_case )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :List[str] , snake_case :PretrainedConfig , snake_case :str = "default" , snake_case :List[PatchingSpec] = None , snake_case :bool = False , ):
'''simple docstring'''
super().__init__(snake_case , task=snake_case , patching_specs=snake_case , use_past=snake_case )
if not getattr(self._config , "pad_token_id" , snake_case ):
# TODO: how to do that better?
A_ : int = 0
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="inputs" )
A_ : int = {0: "batch", 1: "past_sequence + sequence"}
else:
A_ : Tuple = {0: "batch", 1: "sequence"}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE ( self :int , snake_case :PreTrainedTokenizer , snake_case :int = -1 , snake_case :int = -1 , snake_case :bool = False , snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
A_ : Union[str, Any] = super(snake_case , self ).generate_dummy_inputs(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
# We need to order the input in the way they appears in the forward()
A_ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A_ : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A_ : Any = seqlen + 2
A_ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ : int = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers )
]
A_ : Tuple = common_inputs["attention_mask"]
if self.use_past:
A_ : int = ordered_inputs["attention_mask"].dtype
A_ : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return 13
| 361 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __snake_case ( ) -> Tuple:
A_ : List[str] = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=_lowerCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=_lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=_lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=_lowerCAmelCase , default="data/dump" , help="The dump file prefix." )
A_ : int = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
A_ : int = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ : Union[str, Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
A_ : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ : Dict = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ : List[str] = tokenizer.special_tokens_map["cls_token"] # `<s>`
A_ : Any = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ : Any = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
A_ : Union[str, Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
A_ : Union[str, Any] = fp.readlines()
logger.info("Start encoding" )
logger.info(f"{len(_lowerCAmelCase )} examples to process." )
A_ : List[Any] = []
A_ : Tuple = 0
A_ : Union[str, Any] = 10000
A_ : Optional[int] = time.time()
for text in data:
A_ : Any = f"{bos} {text.strip()} {sep}"
A_ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
rslt.append(_lowerCAmelCase )
iter += 1
if iter % interval == 0:
A_ : str = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
A_ : Union[str, Any] = time.time()
logger.info("Finished binarization" )
logger.info(f"{len(_lowerCAmelCase )} examples processed." )
A_ : int = f"{args.dump_file}.{args.tokenizer_name}.pickle"
A_ : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ : Union[str, Any] = [np.uintaa(_lowerCAmelCase ) for d in rslt]
else:
A_ : List[str] = [np.intaa(_lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(_lowerCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , _lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 70 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowerCamelCase = Features({'image': Image()} )
__lowerCamelCase = Features({'labels': ClassLabel} )
__lowerCamelCase = "image"
__lowerCamelCase = "labels"
def UpperCamelCase ( self , lowercase ) -> List[Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
A__ = copy.deepcopy(self )
A__ = self.label_schema.copy()
A__ = features[self.label_column]
A__ = label_schema
return task_template
@property
def UpperCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 68 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a__ :
"""simple docstring"""
__lowerCamelCase = BlenderbotSmallConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowercase , attention_mask=lowercase )[0]
A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__lowerCamelCase = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 68 | 1 |
import operator as op
__UpperCAmelCase = 'scaler.pt'
__UpperCAmelCase = 'pytorch_model'
__UpperCAmelCase = 'random_states'
__UpperCAmelCase = 'optimizer'
__UpperCAmelCase = 'scheduler'
__UpperCAmelCase = 'pytorch_model.bin'
__UpperCAmelCase = 'pytorch_model.bin.index.json'
__UpperCAmelCase = 'model.safetensors'
__UpperCAmelCase = 'model.safetensors.index.json'
__UpperCAmelCase = '1.10.2'
__UpperCAmelCase = 'py38'
__UpperCAmelCase = '4.17.0'
__UpperCAmelCase = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
__UpperCAmelCase = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
__UpperCAmelCase = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
__UpperCAmelCase = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
__UpperCAmelCase = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
__UpperCAmelCase = '2.0.1'
__UpperCAmelCase = ['pdsh', 'standard', 'openmpi', 'mvapich']
__UpperCAmelCase = ['default', 'reduce-overhead', 'max-autotune']
__UpperCAmelCase = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCAmelCase = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
__UpperCAmelCase = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
__UpperCAmelCase = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 368 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = (DDPMParallelScheduler,)
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_UpperCamelCase )
return config
def __UpperCAmelCase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
UpperCAmelCase_ : Any = self.dummy_model()
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : str = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : Tuple = samplea.shape[0]
UpperCAmelCase_ : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : str = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase )
UpperCAmelCase_ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : str = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = len(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : List[str] = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCamelCase )
UpperCAmelCase_ : Tuple = scheduler.timesteps
for i, timestep in enumerate(_UpperCamelCase ):
if i == len(_UpperCamelCase ) - 1:
UpperCAmelCase_ : List[str] = -1
else:
UpperCAmelCase_ : int = timesteps[i + 1]
UpperCAmelCase_ : int = scheduler.previous_timestep(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = prev_t.item()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Tuple = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
with self.assertRaises(_UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_UpperCamelCase , timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Optional[Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
| 145 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 294 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : list[list[str]] = [[] for _ in range(UpperCAmelCase_ )]
_UpperCamelCase : Tuple = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(UpperCAmelCase_ ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : Any = position % (lowest * 2) # puts it in bounds
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase_ )
_UpperCamelCase : List[str] = [''.join(UpperCAmelCase_ ) for row in temp_grid]
_UpperCamelCase : Optional[Any] = ''.join(UpperCAmelCase_ )
return output_string
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = []
_UpperCamelCase : Optional[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
_UpperCamelCase : list[list[str]] = [[] for _ in range(UpperCAmelCase_ )] # generates template
for position in range(len(UpperCAmelCase_ ) ):
_UpperCamelCase : Optional[Any] = position % (lowest * 2) # puts it in bounds
_UpperCamelCase : Tuple = min(UpperCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
_UpperCamelCase : Optional[int] = 0
for row in temp_grid: # fills in the characters
_UpperCamelCase : List[Any] = input_string[counter : counter + len(UpperCAmelCase_ )]
grid.append(list(UpperCAmelCase_ ) )
counter += len(UpperCAmelCase_ )
_UpperCamelCase : int = '' # reads as zigzag
for position in range(len(UpperCAmelCase_ ) ):
_UpperCamelCase : Optional[int] = position % (lowest * 2) # puts it in bounds
_UpperCamelCase : int = min(UpperCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = {}
for key_guess in range(1 , len(UpperCAmelCase_ ) ): # tries every key
_UpperCamelCase : Union[str, Any] = decrypt(UpperCAmelCase_ , UpperCAmelCase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 |
'''simple docstring'''
from datetime import datetime
import requests
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_UpperCamelCase : Optional[int] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
snake_case_ : List[str] = input('Enter Video/IGTV url: ').strip()
snake_case_ : str = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 236 | 1 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
A__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase_ )
A__ , A__ = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
else:
A__ = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase_ )
A__ , A__ = ProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
A__ = ["""key_proj""", """value_proj""", """query_proj"""]
A__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
A__ = key.split(""".""" )
if attributes[0] == "lm_head":
A__ = prophet
A__ = prophet_old
else:
A__ = prophet.prophetnet
A__ = prophet_old.model
A__ = False
for attribute in attributes:
if attribute in mapping:
A__ = mapping[attribute]
if not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) > 0:
A__ = attribute
elif hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
A__ = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
A__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
A__ = old_model.bias
logger.info(F"""{attribute} is initialized""" )
A__ = True
break
elif attribute in special_keys and hasattr(UpperCAmelCase_ , """in_proj_weight""" ):
A__ = old_model.in_proj_weight.shape[0] // 3
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
A__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
A__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
A__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
A__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
A__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
A__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
A__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
A__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
A__ = True
break
if attribute.isdigit():
A__ = model[int(UpperCAmelCase_ )]
A__ = old_model[int(UpperCAmelCase_ )]
else:
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if old_attribute == "":
A__ = old_model
else:
if not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 335 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase__ : int = HUGGINGFACE_HUB_CACHE
lowercase__ : Tuple = """config.json"""
lowercase__ : Union[str, Any] = """diffusion_pytorch_model.bin"""
lowercase__ : List[Any] = """diffusion_flax_model.msgpack"""
lowercase__ : List[str] = """model.onnx"""
lowercase__ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowercase__ : Dict = """weights.pb"""
lowercase__ : List[Any] = """https://huggingface.co"""
lowercase__ : List[Any] = default_cache_path
lowercase__ : Tuple = """diffusers_modules"""
lowercase__ : Tuple = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowercase__ : List[str] = ["""fp16""", """non-ema"""]
lowercase__ : Optional[Any] = """.self_attn"""
| 360 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> List[Any]:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=0 ) -> Union[str, Any]:
"""simple docstring"""
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[column] )
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=float('inf' ) ) -> Optional[int]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase_ : Optional[int] = current_dis
return min_dis
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=float('inf' ) ) -> Dict:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase__ ):
lowerCAmelCase_ : Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase_ : Union[str, Any] = current_dis
return min_dis
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase__ , lowerCAmelCase__ )
# recursion
lowerCAmelCase_ : int = points_counts // 2
lowerCAmelCase_ : Optional[Any] = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[:mid] , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCAmelCase_ : Any = min(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = dis_between_closest_in_strip(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return min(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = column_based_sort(lowerCAmelCase__ , column=0 )
lowerCAmelCase_ : Dict = column_based_sort(lowerCAmelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
lowercase__ : List[str] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 289 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase_ : str = False
class __A ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
a =torch.manual_seed(0 )
a =pipe(
image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
a =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 81 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_lowerCAmelCase = []
for num in range(len(lowerCAmelCase ) ):
_lowerCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
_lowerCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase ) == n:
return list_nums
return []
def UpperCamelCase__ ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = '''bloom'''
_lowercase : Tuple = ['''past_key_values''']
_lowercase : Tuple = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self: Optional[int] , UpperCamelCase_: Dict=250_880 , UpperCamelCase_: str=64 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: int=1E-5 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=1 , UpperCamelCase_: str=2 , UpperCamelCase_: int=False , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop('''n_embed''' , UpperCamelCase_ )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = pretraining_tp
lowercase__ = apply_residual_connection_post_layernorm
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = slow_but_exact
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = version.parse('''1.12''' )
def __init__( self: Optional[int] , UpperCamelCase_: PretrainedConfig , UpperCamelCase_: str = "default" , UpperCamelCase_: List[PatchingSpec] = None , UpperCamelCase_: bool = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(UpperCamelCase_ , task=UpperCamelCase_ , patching_specs=UpperCamelCase_ , use_past=UpperCamelCase_ )
if not getattr(self._config , '''pad_token_id''' , UpperCamelCase_ ):
# TODO: how to do that better?
lowercase__ = 0
@property
def lowerCamelCase_ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''' , inverted_values_shape=UpperCamelCase_ )
lowercase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def lowerCamelCase_ ( self: Any ) -> float:
"""simple docstring"""
return 1E-3
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: "PreTrainedTokenizer" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ = super(UpperCamelCase_ , self ).generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
# We need to order the input in the way they appears in the forward()
lowercase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ , lowercase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ = self._config.hidden_size // self.num_attention_heads
lowercase__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__ = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(self.num_layers )
]
lowercase__ = common_inputs['''attention_mask''']
if self.use_past:
lowercase__ = ordered_inputs['''attention_mask'''].dtype
lowercase__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
return 13
| 93 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[str] ) -> Union[str, Any]:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict=0.9 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[int]=0.5 ) -> Dict:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ = [
meteor_score.single_meteor_score(
word_tokenize(UpperCamelCase_ ) , word_tokenize(UpperCamelCase_ ) , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
else:
lowercase__ = [
meteor_score.single_meteor_score(UpperCamelCase_ , UpperCamelCase_ , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return {"meteor": np.mean(UpperCamelCase_ )}
| 93 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BeitFeatureExtractor''']
lowerCAmelCase__ = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130 | '''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-m", "--pretrained_model_name_or_path", type=a_, default=a_, required=a_, help="Path to pretrained model or model identifier from huggingface.co/models.", )
parser.add_argument(
"-c", "--caption", type=a_, default="robotic cat with wings", help="Text used to generate images.", )
parser.add_argument(
"-n", "--images_num", type=a_, default=4, help="How much images to generate.", )
parser.add_argument(
"-s", "--seed", type=a_, default=42, help="Seed for random process.", )
parser.add_argument(
"-ci", "--cuda_id", type=a_, default=0, help="cuda_id.", )
_UpperCAmelCase : Any = parser.parse_args()
return args
def __UpperCAmelCase ( a_: Any, a_: List[Any], a_: Optional[Any] ):
if not len(a_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = imgs[0].size
_UpperCAmelCase : Union[str, Any] = Image.new("RGB", size=(cols * w, rows * h) )
_UpperCAmelCase , _UpperCAmelCase : Any = grid.size
for i, img in enumerate(a_ ):
grid.paste(a_, box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( a_: List[str], a_: Optional[int]="robotic cat with wings", a_: List[str]=7.5, a_: Optional[int]=50, a_: List[Any]=1, a_: Union[str, Any]=42, ):
_UpperCAmelCase : Optional[Any] = torch.Generator(pipeline.device ).manual_seed(a_ )
_UpperCAmelCase : Dict = pipeline(
a_, guidance_scale=a_, num_inference_steps=a_, generator=a_, num_images_per_prompt=a_, ).images
_UpperCAmelCase : Any = int(math.sqrt(a_ ) )
_UpperCAmelCase : List[Any] = image_grid(a_, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 145 | 0 |
'''simple docstring'''
from math import factorial
lowercase ={str(d): factorial(d) for d in range(10)}
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCamelCase ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Tuple =7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCamelCase ) if sum_of_digit_factorial(__lowerCamelCase ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 242 |
'''simple docstring'''
lowercase =[0, 2, 4, 6, 8]
lowercase =[1, 3, 5, 7, 9]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase : Union[str, Any] =0
for digit in range(1_0 ):
_UpperCAmelCase : str =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , __lowerCamelCase , __lowerCamelCase )
return result
_UpperCAmelCase : Optional[Any] =0
for digita in range(1_0 ):
_UpperCAmelCase : Any =digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase : Optional[int] =ODD_DIGITS
else:
_UpperCAmelCase : Union[str, Any] =EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase : int =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , __lowerCamelCase , __lowerCamelCase , )
return result
def lowerCamelCase__ ( __lowerCamelCase : int = 9 ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCamelCase , 0 , [0] * length , __lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 242 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_UpperCAmelCase : int = {"allegro/herbert-base-cased": 514}
_UpperCAmelCase : Tuple = {}
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = HerbertTokenizer
def __init__( self: Tuple , _lowerCAmelCase: Optional[int]=None , _lowerCAmelCase: int=None , _lowerCAmelCase: Union[str, Any]=None , _lowerCAmelCase: Tuple="<s>" , _lowerCAmelCase: List[Any]="<unk>" , _lowerCAmelCase: int="<pad>" , _lowerCAmelCase: int="<mask>" , _lowerCAmelCase: Union[str, Any]="</s>" , **_lowerCAmelCase: Any , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , **_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :Dict = [self.cls_token_id]
lowercase :List[str] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :List[Any] = [self.sep_token_id]
lowercase :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
lowercase :int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 236 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
lowercase :Tuple = 1024
lowercase :Tuple = 4096
lowercase :Union[str, Any] = 24
lowercase :Tuple = 16
lowercase :Optional[Any] = [5, 11, 17, 23]
lowercase :Optional[Any] = [256, 512, 1024, 1024]
lowercase :Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowercase :Any = 768
lowercase :Optional[int] = [1, 1, 1, 0.5]
lowercase :int = [256, 512, 768, 768]
lowercase :str = 150
lowercase :Tuple = 16
lowercase :List[str] = (1, 384, 384)
lowercase :Any = False
lowercase :Optional[int] = "project"
if "ade" in checkpoint_url:
lowercase :Dict = True
lowercase :str = 768
lowercase :Tuple = [1, 1, 1, 0.5]
lowercase :Dict = 150
lowercase :Any = 16
lowercase :List[Any] = "huggingface/label-files"
lowercase :Union[str, Any] = "ade20k-id2label.json"
lowercase :List[str] = json.load(open(cached_download(hf_hub_url(lowerCamelCase, lowerCamelCase, repo_type="dataset" ) ), "r" ) )
lowercase :Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowercase :Union[str, Any] = idalabel
lowercase :Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase :List[str] = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase :Any = name.replace("pretrained.model", "dpt.encoder" )
if "pretrained.model" in name:
lowercase :int = name.replace("pretrained.model", "dpt.embeddings" )
if "patch_embed" in name:
lowercase :Any = name.replace("patch_embed", "" )
if "pos_embed" in name:
lowercase :Optional[Any] = name.replace("pos_embed", "position_embeddings" )
if "attn.proj" in name:
lowercase :List[str] = name.replace("attn.proj", "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase :str = name.replace("proj", "projection" )
if "blocks" in name:
lowercase :List[Any] = name.replace("blocks", "layer" )
if "mlp.fc1" in name:
lowercase :str = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
lowercase :Any = name.replace("mlp.fc2", "output.dense" )
if "norm1" in name and "backbone" not in name:
lowercase :Any = name.replace("norm1", "layernorm_before" )
if "norm2" in name and "backbone" not in name:
lowercase :Any = name.replace("norm2", "layernorm_after" )
if "scratch.output_conv" in name:
lowercase :Optional[Any] = name.replace("scratch.output_conv", "head" )
if "scratch" in name:
lowercase :Union[str, Any] = name.replace("scratch", "neck" )
if "layer1_rn" in name:
lowercase :str = name.replace("layer1_rn", "convs.0" )
if "layer2_rn" in name:
lowercase :Dict = name.replace("layer2_rn", "convs.1" )
if "layer3_rn" in name:
lowercase :Tuple = name.replace("layer3_rn", "convs.2" )
if "layer4_rn" in name:
lowercase :Optional[int] = name.replace("layer4_rn", "convs.3" )
if "refinenet" in name:
lowercase :Any = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase :Any = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowercase :str = name.replace("out_conv", "projection" )
if "resConfUnit1" in name:
lowercase :Optional[Any] = name.replace("resConfUnit1", "residual_layer1" )
if "resConfUnit2" in name:
lowercase :int = name.replace("resConfUnit2", "residual_layer2" )
if "conv1" in name:
lowercase :Optional[int] = name.replace("conv1", "convolution1" )
if "conv2" in name:
lowercase :List[str] = name.replace("conv2", "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase :Tuple = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase :Any = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase :Optional[Any] = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase :str = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase :str = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase :Any = name.replace("pretrained", "dpt" )
if "bn" in name:
lowercase :Optional[int] = name.replace("bn", "batch_norm" )
if "head" in name:
lowercase :Union[str, Any] = name.replace("head", "head.head" )
if "encoder.norm" in name:
lowercase :Optional[Any] = name.replace("encoder.norm", "layernorm" )
if "auxlayer" in name:
lowercase :str = name.replace("auxlayer", "auxiliary_head.head" )
if "backbone" in name:
lowercase :List[str] = name.replace("backbone", "backbone.bit.encoder" )
if ".." in name:
lowercase :Optional[int] = name.replace("..", "." )
if "stem.conv" in name:
lowercase :Union[str, Any] = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
lowercase :List[str] = name.replace("blocks", "layers" )
if "convolution" in name and "backbone" in name:
lowercase :int = name.replace("convolution", "conv" )
if "layer" in name and "backbone" in name:
lowercase :List[str] = name.replace("layer", "layers" )
if "backbone.bit.encoder.bit" in name:
lowercase :Dict = name.replace("backbone.bit.encoder.bit", "backbone.bit" )
if "embedder.conv" in name:
lowercase :Dict = name.replace("embedder.conv", "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase :str = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm" )
return name
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase :List[str] = in_proj_weight[: config.hidden_size, :]
lowercase :int = in_proj_bias[: config.hidden_size]
lowercase :Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Dict = in_proj_weight[
-config.hidden_size :, :
]
lowercase :List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( ):
lowercase :List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase :Any = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase , lowercase :Optional[int] = get_dpt_config(lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase :List[str] = torch.load(lowerCamelCase, map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowercase :int = state_dict.pop(lowerCamelCase )
lowercase :Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase, lowerCamelCase )
# load HuggingFace model
lowercase :int = DPTForSemanticSegmentation(lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Check outputs on an image
lowercase :Union[str, Any] = 480 if "ade" in checkpoint_url else 384
lowercase :Optional[int] = DPTImageProcessor(size=lowerCamelCase )
lowercase :Dict = prepare_img()
lowercase :Optional[Any] = image_processor(lowerCamelCase, return_tensors="pt" )
# forward pass
lowercase :Any = model(**lowerCamelCase ).logits if "ade" in checkpoint_url else model(**lowerCamelCase ).predicted_depth
if show_prediction:
lowercase :List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=lowerCamelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ (A , A , A ) ->tuple[float, list[float]]:
"""simple docstring"""
lowercase__ : Tuple= list(range(len(A ) ) )
lowercase__ : Tuple= [v / w for v, w in zip(A , A )]
index.sort(key=lambda A : ratio[i] , reverse=A )
lowercase__ : float= 0
lowercase__ : list[float]= [0] * len(A )
for i in index:
if weight[i] <= capacity:
lowercase__ : Tuple= 1
max_value += value[i]
capacity -= weight[i]
else:
lowercase__ : Union[str, Any]= capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a : Dict = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
a : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
a : int = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Optional[int]= spearmanr(snake_case__ , snake_case__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 150 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( _lowerCAmelCase : int ) -> Union[str, Any]:
create_state_space_tree(_lowerCAmelCase, [], 0, [0 for i in range(len(_lowerCAmelCase ) )] )
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : Any, ) -> Any:
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
for i in range(len(_lowerCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase : Union[str, Any] = True
create_state_space_tree(_lowerCAmelCase, _lowerCAmelCase, index + 1, _lowerCAmelCase )
current_sequence.pop()
_UpperCAmelCase : List[Any] = False
lowerCamelCase__ : Optional[int] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase__ : Optional[int] = ['''A''', '''B''', '''C''']
generate_all_permutations(sequence_a)
| 246 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289 | 0 |
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Any = """microsoft/speecht5_tts"""
__SCREAMING_SNAKE_CASE :List[str] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__SCREAMING_SNAKE_CASE :int = """text_reader"""
__SCREAMING_SNAKE_CASE :Tuple = SpeechTaProcessor
__SCREAMING_SNAKE_CASE :Optional[Any] = SpeechTaForTextToSpeech
__SCREAMING_SNAKE_CASE :Optional[int] = SpeechTaHifiGan
__SCREAMING_SNAKE_CASE :List[Any] = ["""text"""]
__SCREAMING_SNAKE_CASE :Optional[Any] = ["""audio"""]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__magic_name__ = '''microsoft/speecht5_hifigan'''
super().setup()
def snake_case__ ( self : int , a__ : Dict , a__ : Optional[Any]=None ):
__magic_name__ = self.pre_processor(text=a__ , return_tensors='''pt''' , truncation=a__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__magic_name__ = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__magic_name__ = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : Union[str, Any] , a__ : Any ):
with torch.no_grad():
return self.model.generate_speech(**a__ )
def snake_case__ ( self : Any , a__ : str ):
with torch.no_grad():
return self.post_processor(a__ ).cpu().detach()
| 355 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase = object()
def UpperCamelCase ( a , a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(a ) - len(a ) + 1 ):
__magic_name__ = [x.match(a ) for x, y in zip(a , ks[i:] )]
if matches and all(a ):
return True
return False
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
def replace(a , a ):
for rule, replacement in rules:
if _match(a , a ):
return replacement
return val
return replace
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , a )),
(("transformer", "wte", "embedding"), P('''mp''' , a )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , a )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , a )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = _get_partition_rules()
__magic_name__ = _replacement_rules(a )
__magic_name__ = {k: _unmatched for k in flatten_dict(a )}
__magic_name__ = {k: replace(a , a ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a ) )
| 98 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''Wav2Vec2FeatureExtractor'''
lowerCAmelCase_ = '''AutoTokenizer'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.feature_extractor
lowercase_ : List[Any] = False
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
return super().from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , __SCREAMING_SNAKE_CASE , )
lowercase_ : str = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Any = WavaVecaCTCTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowercase_ : Tuple = kwargs.pop('''raw_speech''' )
else:
lowercase_ : Optional[Any] = kwargs.pop('''audio''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = kwargs.pop('''sampling_rate''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Any = args[0]
lowercase_ : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase_ : List[Any] = self.feature_extractor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
lowercase_ : Optional[int] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase_ : str = encodings['''input_ids''']
return inputs
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = kwargs.pop('''input_features''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = kwargs.pop('''labels''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : List[Any] = args[0]
lowercase_ : List[Any] = args[1:]
if input_features is not None:
lowercase_ : Optional[int] = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if labels is not None:
lowercase_ : Union[str, Any] = self.tokenizer.pad(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase_ : Union[str, Any] = labels['''input_ids''']
return input_features
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowercase_ : List[Any] = True
lowercase_ : str = self.tokenizer
yield
lowercase_ : List[str] = self.feature_extractor
lowercase_ : int = False
| 93 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : Tuple = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : List[str] = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowercase : List[str] = {
"facebook/m2m100_418M": 1_0_2_4,
}
# fmt: off
_lowercase : Tuple = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="m2m100" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=8 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : List[Any] = language_codes
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ : List[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
lowercase_ : Union[str, Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(__SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , language_codes=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = vocab_file
lowercase_ : Any = load_json(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[int] = spm_file
lowercase_ : Any = load_spm(__SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
lowercase_ : List[Any] = len(self.encoder )
lowercase_ : Dict = {
self.get_lang_token(__SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ : Tuple = src_lang if src_lang is not None else '''en'''
lowercase_ : Optional[int] = tgt_lang
lowercase_ : Any = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ : Dict = num_madeup_words
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = []
lowercase_ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowercase_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowercase_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : List[Any] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[Any] = {}
lowercase_ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Tuple = Path(__SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (str(__SCREAMING_SNAKE_CASE ), str(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = src_lang
lowercase_ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Tuple = src_lang
lowercase_ : Any = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_lang_id(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.lang_token_to_id[lang_token]
lowercase_ : Optional[Any] = [self.cur_lang_id]
lowercase_ : Union[str, Any] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.lang_token_to_id[lang_token]
lowercase_ : str = [self.cur_lang_id]
lowercase_ : List[str] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.get_lang_token(__SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = sentencepiece.SentencePieceProcessor(**__SCREAMING_SNAKE_CASE )
spm.Load(str(__SCREAMING_SNAKE_CASE ) )
return spm
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , indent=2 )
| 93 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowercase: Optional[int] = "sshleifer/bart-tiny-random"
__lowercase: Dict = "patrickvonplaten/t5-tiny-random"
@require_torch
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return AutoConfig.from_pretrained(_lowerCamelCase )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=1, d=_lowerCamelCase )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=1, d=_lowerCamelCase )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=_lowerCamelCase, d=_lowerCamelCase ) | 367 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323") | 31 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = """RegNetConfig"""
# Base docstring
_A = """facebook/regnet-y-040"""
_A = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_A = """facebook/regnet-y-040"""
_A = """tabby, tabby cat"""
_A = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int = 3 , UpperCamelCase : int = 1 , UpperCamelCase : int = 1 , UpperCamelCase : Optional[str] = "relu" , **UpperCamelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ : str = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ : int = tf.keras.layers.ConvaD(
filters=UpperCamelCase , kernel_size=UpperCamelCase , strides=UpperCamelCase , padding="""VALID""" , groups=UpperCamelCase , use_bias=UpperCamelCase , name="""convolution""" , )
lowerCAmelCase__ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
lowerCAmelCase__ : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.convolution(self.padding(UpperCamelCase ) )
lowerCAmelCase__ : str = self.normalization(UpperCamelCase )
lowerCAmelCase__ : str = self.activation(UpperCamelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self : str , UpperCamelCase : RegNetConfig , **UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : str = config.num_channels
lowerCAmelCase__ : Tuple = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = shape_list(UpperCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ : Optional[Any] = tf.transpose(UpperCamelCase , perm=(0, 2, 3, 1) )
lowerCAmelCase__ : Tuple = self.embedder(UpperCamelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Dict , UpperCamelCase : int , UpperCamelCase : int = 2 , **UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : int = tf.keras.layers.ConvaD(
filters=UpperCamelCase , kernel_size=1 , strides=UpperCamelCase , use_bias=UpperCamelCase , name="""convolution""" )
lowerCAmelCase__ : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : tf.Tensor , UpperCamelCase : bool = False ) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(UpperCamelCase ) , training=UpperCamelCase )
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Any , UpperCamelCase : int , UpperCamelCase : int , **UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase , name="""pooler""" )
lowerCAmelCase__ : Union[str, Any] = [
tf.keras.layers.ConvaD(filters=UpperCamelCase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=UpperCamelCase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def _lowerCAmelCase ( self : Any , UpperCamelCase : Dict ) -> int:
"""simple docstring"""
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowerCAmelCase__ : List[Any] = self.pooler(UpperCamelCase )
for layer_module in self.attention:
lowerCAmelCase__ : Any = layer_module(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = hidden_state * pooled
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self : List[str] , UpperCamelCase : RegNetConfig , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int = 1 , **UpperCamelCase : Dict ) -> str:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Dict = in_channels != out_channels or stride != 1
lowerCAmelCase__ : List[Any] = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ : Dict = (
TFRegNetShortCut(UpperCamelCase , stride=UpperCamelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ : Optional[Any] = [
TFRegNetConvLayer(UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
UpperCamelCase , stride=UpperCamelCase , groups=UpperCamelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(UpperCamelCase , kernel_size=1 , activation=UpperCamelCase , name="""layer.2""" ),
]
lowerCAmelCase__ : str = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ : List[str] = layer_module(UpperCamelCase )
lowerCAmelCase__ : int = self.shortcut(UpperCamelCase )
hidden_state += residual
lowerCAmelCase__ : List[str] = self.activation(UpperCamelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Any , UpperCamelCase : RegNetConfig , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int = 1 , **UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Any = in_channels != out_channels or stride != 1
lowerCAmelCase__ : Optional[Any] = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ : str = (
TFRegNetShortCut(UpperCamelCase , stride=UpperCamelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
lowerCAmelCase__ : Optional[int] = [
TFRegNetConvLayer(UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
UpperCamelCase , stride=UpperCamelCase , groups=UpperCamelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(UpperCamelCase , kernel_size=1 , activation=UpperCamelCase , name="""layer.3""" ),
]
lowerCAmelCase__ : str = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self : str , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ : Dict = layer_module(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self.shortcut(UpperCamelCase )
hidden_state += residual
lowerCAmelCase__ : str = self.activation(UpperCamelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Any , UpperCamelCase : RegNetConfig , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , **UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Optional[int] = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
lowerCAmelCase__ : Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase , UpperCamelCase , UpperCamelCase , stride=UpperCamelCase , name="""layers.0""" ),
*[layer(UpperCamelCase , UpperCamelCase , UpperCamelCase , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ : Optional[int] = layer_module(UpperCamelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Any , UpperCamelCase : RegNetConfig , **UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
lowerCAmelCase__ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCamelCase , UpperCamelCase , UpperCamelCase , depth=UpperCamelCase , name=f"""stages.{i+1}""" ) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : tf.Tensor , UpperCamelCase : bool = False , UpperCamelCase : bool = True ) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
lowerCAmelCase__ : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ : List[str] = hidden_states + (hidden_state,)
lowerCAmelCase__ : Optional[Any] = stage_module(UpperCamelCase )
if output_hidden_states:
lowerCAmelCase__ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase , hidden_states=UpperCamelCase )
@keras_serializable
class _lowerCamelCase ( tf.keras.layers.Layer ):
_lowerCamelCase :Dict = RegNetConfig
def __init__( self : Any , UpperCamelCase : Dict , **UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Optional[int] = config
lowerCAmelCase__ : Any = TFRegNetEmbeddings(UpperCamelCase , name="""embedder""" )
lowerCAmelCase__ : Dict = TFRegNetEncoder(UpperCamelCase , name="""encoder""" )
lowerCAmelCase__ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase , name="""pooler""" )
@unpack_inputs
def _lowerCAmelCase ( self : Dict , UpperCamelCase : tf.Tensor , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowerCAmelCase__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Dict = self.embedder(UpperCamelCase , training=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self.encoder(
UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase , training=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = encoder_outputs[0]
lowerCAmelCase__ : List[Any] = self.pooler(UpperCamelCase )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ : Optional[Any] = tf.transpose(UpperCamelCase , perm=(0, 3, 1, 2) )
lowerCAmelCase__ : Optional[Any] = tf.transpose(UpperCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ : int = tuple([tf.transpose(UpperCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Any = RegNetConfig
_lowerCamelCase :Any = "regnet"
_lowerCamelCase :List[Any] = "pixel_values"
@property
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
_A = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_A = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , a_ , )
class _lowerCamelCase ( a_ ):
def __init__( self : Optional[int] , UpperCamelCase : RegNetConfig , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
super().__init__(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Tuple = TFRegNetMainLayer(UpperCamelCase , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : tf.Tensor , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[int]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Union[str, Any] = self.regnet(
pixel_values=UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase , training=UpperCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a_ , )
class _lowerCamelCase ( a_ , a_ ):
def __init__( self : Union[str, Any] , UpperCamelCase : RegNetConfig , *UpperCamelCase : int , **UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().__init__(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = config.num_labels
lowerCAmelCase__ : str = TFRegNetMainLayer(UpperCamelCase , name="""regnet""" )
# classification head
lowerCAmelCase__ : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : tf.Tensor = None , UpperCamelCase : tf.Tensor = None , UpperCamelCase : bool = None , UpperCamelCase : bool = None , UpperCamelCase : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
lowerCAmelCase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Union[str, Any] = self.regnet(
UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase , training=UpperCamelCase )
lowerCAmelCase__ : int = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : List[str] = self.classifier[0](UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.classifier[1](UpperCamelCase )
lowerCAmelCase__ : Optional[int] = None if labels is None else self.hf_compute_loss(labels=UpperCamelCase , logits=UpperCamelCase )
if not return_dict:
lowerCAmelCase__ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states )
| 242 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
def lowercase_ ( __UpperCAmelCase ) -> List[List[ImageInput]]:
if isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCAmelCase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[int] = ["pixel_values"]
def __init__( self : Dict , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , **UpperCamelCase : Dict , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : int = size if size is not None else {"""shortest_edge""": 2_56}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase__ : List[str] = get_size_dict(UpperCamelCase , param_name="""crop_size""" )
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : str = size
lowerCAmelCase__ : str = do_center_crop
lowerCAmelCase__ : Tuple = crop_size
lowerCAmelCase__ : Union[str, Any] = resample
lowerCAmelCase__ : Any = do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor
lowerCAmelCase__ : Dict = offset
lowerCAmelCase__ : Optional[int] = do_normalize
lowerCAmelCase__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" in size:
lowerCAmelCase__ : int = get_resize_output_image_size(UpperCamelCase , size["""shortest_edge"""] , default_to_square=UpperCamelCase )
elif "height" in size and "width" in size:
lowerCAmelCase__ : Union[str, Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : int = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = image.astype(np.floataa )
if offset:
lowerCAmelCase__ : Tuple = image - (scale / 2)
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase__ : Optional[Any] = to_numpy_array(UpperCamelCase )
if do_resize:
lowerCAmelCase__ : List[str] = self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase )
if do_center_crop:
lowerCAmelCase__ : List[str] = self.center_crop(UpperCamelCase , size=UpperCamelCase )
if do_rescale:
lowerCAmelCase__ : Optional[int] = self.rescale(image=UpperCamelCase , scale=UpperCamelCase , offset=UpperCamelCase )
if do_normalize:
lowerCAmelCase__ : Tuple = self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase )
lowerCAmelCase__ : List[str] = to_channel_dimension_format(UpperCamelCase , UpperCamelCase )
return image
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Dict = offset if offset is not None else self.offset
lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : List[Any] = size if size is not None else self.size
lowerCAmelCase__ : Tuple = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""crop_size""" )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCAmelCase__ : int = make_batched(UpperCamelCase )
lowerCAmelCase__ : str = [
[
self._preprocess_image(
image=UpperCamelCase , do_resize=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , do_center_crop=UpperCamelCase , crop_size=UpperCamelCase , do_rescale=UpperCamelCase , rescale_factor=UpperCamelCase , offset=UpperCamelCase , do_normalize=UpperCamelCase , image_mean=UpperCamelCase , image_std=UpperCamelCase , data_format=UpperCamelCase , )
for img in video
]
for video in videos
]
lowerCAmelCase__ : Dict = {"""pixel_values""": videos}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 242 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = "convbert"
def __init__( self : str , UpperCamelCase : str=3_05_22 , UpperCamelCase : Any=7_68 , UpperCamelCase : Optional[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Optional[int]=30_72 , UpperCamelCase : int="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Optional[int]=5_12 , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1E-1_2 , UpperCamelCase : List[str]=1 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : int=2 , UpperCamelCase : Optional[Any]=7_68 , UpperCamelCase : Any=2 , UpperCamelCase : Optional[int]=9 , UpperCamelCase : List[Any]=1 , UpperCamelCase : int=None , **UpperCamelCase : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = max_position_embeddings
lowerCAmelCase__ : Dict = type_vocab_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = embedding_size
lowerCAmelCase__ : List[Any] = head_ratio
lowerCAmelCase__ : List[Any] = conv_kernel_size
lowerCAmelCase__ : Dict = num_groups
lowerCAmelCase__ : int = classifier_dropout
class _lowerCamelCase ( a_ ):
@property
def _lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 369 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[str, Any] = "token-classification"
def __init__( self : Dict , UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
if type(UpperCamelCase ) == dict:
lowerCAmelCase__ : Optional[int] = Namespace(**UpperCamelCase )
lowerCAmelCase__ : Tuple = import_module("""tasks""" )
try:
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCAmelCase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Dict = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase , len(self.labels ) , self.mode )
def _lowerCAmelCase ( self : int , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : Union[str, Any] = self._feature_file(UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.load(UpperCamelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCAmelCase__ : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase )
lowerCAmelCase__ : Tuple = self.token_classification_task.convert_examples_to_features(
UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCamelCase )
torch.save(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase__ : int = self._feature_file(UpperCamelCase )
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : int = torch.load(UpperCamelCase )
lowerCAmelCase__ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
"""Compute validation""" ""
lowerCAmelCase__ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Union[str, Any] = self(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs[:2]
lowerCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Optional[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCAmelCase__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : List[str] = np.argmax(UpperCamelCase , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : Any = dict(enumerate(self.labels ) )
lowerCAmelCase__ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : Optional[int] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCamelCase , UpperCamelCase ),
"""precision""": precision_score(UpperCamelCase , UpperCamelCase ),
"""recall""": recall_score(UpperCamelCase , UpperCamelCase ),
"""f1""": fa_score(UpperCamelCase , UpperCamelCase ),
}
lowerCAmelCase__ : Dict = dict(results.items() )
lowerCAmelCase__ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
# when stable
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._eval_end(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase ( self : Dict , UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
# updating to test_epoch_end instead of deprecated test_end
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._eval_end(UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A = parser.parse_args()
_A = NERTransformer(args)
_A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 212 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowercase_ ( _lowerCamelCase: Tuple ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : str = checkpoints.load_tax_checkpoint(_UpperCamelCase )
__lowerCamelCase : Optional[int] = flatten_dict(_UpperCamelCase )
return flax_params
def lowercase_ ( _lowerCamelCase: Any ) -> str:
'''simple docstring'''
__lowerCamelCase : int = {}
__lowerCamelCase : Union[str, Any] = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
__lowerCamelCase : str = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__lowerCamelCase : List[str] = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__lowerCamelCase : Optional[Any] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__lowerCamelCase : str = new_key.replace(_UpperCamelCase , _UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__lowerCamelCase : List[str] = re.sub(r"layers_(\d+)" , r"layer.\1" , _UpperCamelCase )
__lowerCamelCase : str = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__lowerCamelCase : Optional[Any] = re.sub(r"layers_(\d+)" , r"layer.\1" , _UpperCamelCase )
__lowerCamelCase : int = flax_dict[key]
__lowerCamelCase : int = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__lowerCamelCase : str = torch.from_numpy(converted_dict[key].T )
else:
__lowerCamelCase : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int]=False , _lowerCamelCase: Tuple=False ) -> Any:
'''simple docstring'''
__lowerCamelCase : Dict = get_flax_param(_UpperCamelCase )
if not use_large:
__lowerCamelCase : int = PixaStructVisionConfig()
__lowerCamelCase : Any = PixaStructTextConfig()
else:
__lowerCamelCase : Optional[int] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
__lowerCamelCase : List[Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
__lowerCamelCase : List[str] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_UpperCamelCase )
__lowerCamelCase : Optional[Any] = PixaStructForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase : List[str] = rename_and_convert_flax_params(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
__lowerCamelCase : List[str] = PixaStructImageProcessor()
__lowerCamelCase : Any = PixaStructProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase )
if use_large:
__lowerCamelCase : List[str] = 4096
__lowerCamelCase : Any = True
# mkdir if needed
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
print("Model saved in {}".format(_UpperCamelCase ) )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
) | 135 | """simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
SCREAMING_SNAKE_CASE__ = 2_048
SCREAMING_SNAKE_CASE__ = 4_096
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = os.environ.pop("PROCESS_TRAIN", "false")
SCREAMING_SNAKE_CASE__ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Any:
"""simple docstring"""
def choose_first(_UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=False ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
snake_case = {'id': example['id']}
snake_case = example['annotations']
snake_case = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case = ['yes'] if 1 in yes_no_answer else ['no']
snake_case = snake_case = []
snake_case = snake_case = []
snake_case = ['<cls>']
else:
snake_case = ['short']
snake_case = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
snake_case = ['long']
snake_case = choose_first(annotation['long_answer'] , is_long_answer=_UpperCamelCase )
snake_case = []
answer.update(_UpperCamelCase )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case = True
else:
snake_case = False
snake_case = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , _UpperCamelCase ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = _get_single_answer(_UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = example['document']['tokens']
snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case = example['document']['tokens']
snake_case = answer['start_token']
snake_case = answer['end_token']
snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case = doc['is_html'][answer['start_token'] : answer['end_token']]
snake_case = doc['token'][answer['start_token'] : answer['end_token']]
snake_case = ' '.join([old[i] for i in range(len(_UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , _UpperCamelCase , end='\n' )
print('Old:' , _UpperCamelCase , end='\n\n' )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : Dict=True ) -> Optional[Any]:
"""simple docstring"""
snake_case = get_context_and_ans(_UpperCamelCase , assertion=_UpperCamelCase )
snake_case = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case = tokenizer(example['question']['text'] , out['context'] ).input_ids
snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = []
snake_case = []
snake_case = input_ids[:q_len]
snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(_UpperCamelCase ),
"end_token": [-1_0_0] * len(_UpperCamelCase ),
"category": category,
},
}
snake_case = out['context'].split()
snake_case = splitted_context[answer['end_token']]
snake_case = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=_UpperCamelCase , ).input_ids )
snake_case = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=_UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case = len(tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
snake_case = answer['start_token']
snake_case = answer['end_token']
if assertion:
snake_case = tokenizer.decode(_UpperCamelCase )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , _UpperCamelCase , end='\n\n' )
if len(_UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case = input_ids[:q_len]
snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
snake_case = []
snake_case = []
snake_case = []
snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case = start_token - i + q_len
snake_case = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
snake_case = -1_0_0
snake_case = -1_0_0
answers_category.append('null' )
snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCamelCase )
answers_end_token.append(_UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(_UpperCamelCase ) )
print('Old:' , tokenizer.decode(_UpperCamelCase ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = get_strided_contexts_and_ans(
_UpperCamelCase , _UpperCamelCase , doc_stride=_UpperCamelCase , max_length=_UpperCamelCase , assertion=_UpperCamelCase , )
return example
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
with jsonlines.open(_UpperCamelCase , 'a' ) as writer:
for example in tqdm(_UpperCamelCase , total=len(_UpperCamelCase ) , desc='Saving samples ... ' ):
snake_case = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = load_dataset("natural_questions")
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
SCREAMING_SNAKE_CASE__ = data["train" if PROCESS_TRAIN == "true" else "validation"]
SCREAMING_SNAKE_CASE__ = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
SCREAMING_SNAKE_CASE__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
SCREAMING_SNAKE_CASE__ = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
SCREAMING_SNAKE_CASE__ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 150 | 0 |
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if len(UpperCamelCase_ ) < 2:
return collection
def circle_sort_util(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) -> bool:
snake_case = False
if low == high:
return swapped
snake_case = low
snake_case = high
while left < right:
if collection[left] > collection[right]:
snake_case , snake_case = (
collection[right],
collection[left],
)
snake_case = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
snake_case , snake_case = (
collection[right + 1],
collection[left],
)
snake_case = True
snake_case = low + int((high - low) / 2 )
snake_case = circle_sort_util(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
snake_case = circle_sort_util(UpperCamelCase_ ,mid + 1 ,UpperCamelCase_ )
return swapped or left_swap or right_swap
snake_case = True
while is_not_sorted is True:
snake_case = circle_sort_util(UpperCamelCase_ ,0 ,len(UpperCamelCase_ ) - 1 )
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = input("Enter numbers separated by a comma:\n").strip()
_SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 357 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE : str = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def UpperCAmelCase__ (UpperCamelCase_ = "mumbai" ):
"""simple docstring"""
snake_case = BeautifulSoup(requests.get(url + location ).content ,'''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' ,attrs={'''data-tn-component''': '''organicJob'''} ):
snake_case = job.find('''a''' ,attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
snake_case = job.find('''span''' ,{'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 213 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase : str = '\\n Text data.\n Second line of data.'
UpperCAmelCase : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
__SCREAMING_SNAKE_CASE = bytes(a__ , """utf-8""" )
with zstd.open(a__ , """wb""" ) as f:
f.write(a__ )
return path
@pytest.fixture
def a__ ( a__ ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a__ ) , """w""" ) as f:
f.write(a__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def a__ ( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
__SCREAMING_SNAKE_CASE = input_paths[compression_format]
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = DownloadConfig(cache_dir=a__ , extract_compressed_file=a__ )
__SCREAMING_SNAKE_CASE = cached_path(a__ , download_config=a__ )
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.read()
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """custom_cache"""
__SCREAMING_SNAKE_CASE = """custom_extracted_dir"""
__SCREAMING_SNAKE_CASE = tmp_path / """custom_extracted_path"""
if default_extracted:
__SCREAMING_SNAKE_CASE = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a__ ) )
__SCREAMING_SNAKE_CASE = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__SCREAMING_SNAKE_CASE = xz_file
__SCREAMING_SNAKE_CASE = (
DownloadConfig(extract_compressed_file=a__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a__ )
)
__SCREAMING_SNAKE_CASE = cached_path(a__ , download_config=a__ )
assert Path(a__ ).parent.parts[-2:] == expected
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = str(Path(a__ ).resolve() )
assert cached_path(a__ ) == text_file
# relative path
__SCREAMING_SNAKE_CASE = str(Path(a__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a__ ) == text_file
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a__ ):
cached_path(a__ )
# relative path
__SCREAMING_SNAKE_CASE = """./__missing_file__.txt"""
with pytest.raises(a__ ):
cached_path(a__ )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_from_cache(F'tmp://{tmpfs_file}' )
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__ )
def a__ ( ):
"""simple docstring"""
with pytest.raises(a__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__ )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a__ ):
http_get("""https://huggingface.co""" , temp_file=a__ )
with pytest.raises(a__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__ )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a__ )
with pytest.raises(a__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__ )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a__ )
with pytest.raises(a__ ):
fsspec_head("""s3://huggingface.co""" )
| 267 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__(self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , ) ->int:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Any =nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict =nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Any =False
lowerCamelCase__: List[str] =nn.Dropout(p=UpperCAmelCase_)
lowerCamelCase__: str =TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
lowerCamelCase__: int =nn.ModuleList()
for lyr_num in range(UpperCAmelCase_):
lowerCamelCase__: str =TaBlock(UpperCAmelCase_)
self.encoders.append(UpperCAmelCase_)
lowerCamelCase__: int =TaLayerNorm(UpperCAmelCase_)
lowerCamelCase__: List[str] =nn.Dropout(p=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Any =self.token_embedder(UpperCAmelCase_)
lowerCamelCase__: Any =encoder_input_tokens.shape[1]
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device)
x += self.position_encoding(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.dropout_pre(UpperCAmelCase_)
# inverted the attention mask
lowerCamelCase__: Optional[int] =encoder_input_tokens.size()
lowerCamelCase__: Optional[Any] =self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_)
for lyr in self.encoders:
lowerCamelCase__: Optional[int] =lyr(UpperCAmelCase_ , UpperCAmelCase_)[0]
lowerCamelCase__: int =self.layer_norm(UpperCAmelCase_)
return self.dropout_post(UpperCAmelCase_), encoder_inputs_mask
| 273 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CTRLTokenizer
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: Optional[Any] =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCamelCase__: Optional[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Union[str, Any] =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCamelCase__: Union[str, Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int ="adapt react readapt apt"
lowerCamelCase__: int ="adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowerCamelCase__: Union[str, Any] ="adapt react readapt apt"
lowerCamelCase__: Tuple ="adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[str] =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 273 | 1 |
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = abs(__lowerCAmelCase )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def __A ( )-> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None:
_UpperCAmelCase = F"""{func.__name__}({value})"""
_UpperCAmelCase = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 39 | '''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = "encodec"
def __init__( self : Optional[int] , A : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , A : List[Any]=24000 , A : Union[str, Any]=1 , A : List[Any]=False , A : Optional[int]=None , A : int=None , A : str=128 , A : List[Any]=32 , A : List[Any]=1 , A : int=[8, 5, 4, 2] , A : Optional[int]="weight_norm" , A : List[Any]=7 , A : Any=7 , A : Dict=3 , A : Optional[int]=2 , A : Dict=True , A : Dict="reflect" , A : Any=2 , A : Dict=2 , A : str=1.0 , A : Optional[int]=1024 , A : Any=None , A : Any=True , **A : str , ):
_UpperCAmelCase : Optional[int] = target_bandwidths
_UpperCAmelCase : List[str] = sampling_rate
_UpperCAmelCase : Optional[int] = audio_channels
_UpperCAmelCase : str = normalize
_UpperCAmelCase : int = chunk_length_s
_UpperCAmelCase : str = overlap
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : int = num_filters
_UpperCAmelCase : Optional[Any] = num_residual_layers
_UpperCAmelCase : Optional[int] = upsampling_ratios
_UpperCAmelCase : int = norm_type
_UpperCAmelCase : List[Any] = kernel_size
_UpperCAmelCase : List[Any] = last_kernel_size
_UpperCAmelCase : List[Any] = residual_kernel_size
_UpperCAmelCase : List[str] = dilation_growth_rate
_UpperCAmelCase : Dict = use_causal_conv
_UpperCAmelCase : Tuple = pad_mode
_UpperCAmelCase : Tuple = compress
_UpperCAmelCase : List[str] = num_lstm_layers
_UpperCAmelCase : List[Any] = trim_right_ratio
_UpperCAmelCase : int = codebook_size
_UpperCAmelCase : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**A )
@property
def _A ( self : Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _A ( self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _A ( self : str ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 31 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
lowerCamelCase = 'Wav2Vec2FeatureExtractor'
lowerCamelCase = 'AutoTokenizer'
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
super().__init__(_UpperCamelCase , _UpperCamelCase )
A_ : List[Any] = self.feature_extractor
A_ : Dict = False
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]:
try:
return super().from_pretrained(_UpperCamelCase , **_UpperCamelCase )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _UpperCamelCase , )
A_ : Dict = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
A_ : Dict = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
return cls(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCamelCase , **_UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A_ : Optional[Any] = kwargs.pop("""raw_speech""" )
else:
A_ : Any = kwargs.pop("""audio""" , _UpperCamelCase )
A_ : str = kwargs.pop("""sampling_rate""" , _UpperCamelCase )
A_ : Optional[Any] = kwargs.pop("""text""" , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
A_ : Tuple = args[0]
A_ : int = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A_ : Optional[Any] = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
if text is not None:
A_ : Any = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ : str = encodings["""input_ids"""]
return inputs
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCamelCase , **_UpperCamelCase )
A_ : Any = kwargs.pop("""input_features""" , _UpperCamelCase )
A_ : List[Any] = kwargs.pop("""labels""" , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
A_ : Any = args[0]
A_ : List[str] = args[1:]
if input_features is not None:
A_ : int = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
if labels is not None:
A_ : Optional[int] = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A_ : List[Any] = labels["""input_ids"""]
return input_features
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> int:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@contextmanager
def UpperCAmelCase_ ( self ) -> Tuple:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A_ : str = True
A_ : Optional[int] = self.tokenizer
yield
A_ : Any = self.feature_extractor
A_ : str = False
| 358 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 164 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> tuple[float, list[float]]:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
_UpperCAmelCase : List[Any] = [v / w for v, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
index.sort(key=lambda lowerCAmelCase_ : ratio[i] , reverse=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : float = 0
_UpperCAmelCase : list[float] = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in index:
if weight[i] <= capacity:
_UpperCAmelCase : int = 1
max_value += value[i]
capacity -= weight[i]
else:
_UpperCAmelCase : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215 |
import os
from distutils.util import strtobool
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
for e in env_keys:
lowerCAmelCase__ : Union[str, Any] = int(os.environ.get(SCREAMING_SNAKE_CASE_ , -1 ) )
if val >= 0:
return val
return default
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
lowerCAmelCase__ : Optional[int] = os.environ.get(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
return strtobool(SCREAMING_SNAKE_CASE_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="no" ) -> List[str]:
lowerCAmelCase__ : Optional[int] = os.environ.get(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
return value | 212 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = jnp.ones((batch_size, length) ) / length
return scores
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = 2_0
_UpperCAmelCase : str = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__ )
# tweak scores to not be uniform anymore
_UpperCAmelCase : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCAmelCase : Optional[Any] = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCAmelCase : Union[str, Any] = jax.nn.softmax(lowerCAmelCase__ , axis=-1 )
_UpperCAmelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCAmelCase : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 )
_UpperCAmelCase : Dict = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Optional[int] = 1_0
_UpperCAmelCase : int = 2
# create ramp distribution
_UpperCAmelCase : str = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCAmelCase : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCAmelCase : Optional[int] = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : Union[str, Any] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCAmelCase : Optional[int] = 5
_UpperCAmelCase : Union[str, Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCAmelCase : Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, length) ).copy()
_UpperCAmelCase : List[str] = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = 1_0
_UpperCAmelCase : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCAmelCase : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
_UpperCAmelCase : Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCAmelCase : Optional[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
_UpperCAmelCase : List[str] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCAmelCase : int = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCAmelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCAmelCase : List[str] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = 2_0
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Any = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase__ )
# check that min length is applied at length 5
_UpperCAmelCase : Optional[Any] = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
_UpperCAmelCase : List[str] = 5
_UpperCAmelCase : Tuple = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
_UpperCAmelCase : int = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = 1_5
_UpperCAmelCase : int = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = 2_0
_UpperCAmelCase : int = 4
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
_UpperCAmelCase : Any = ids_tensor((batch_size, 1) , vocab_size=2_0 )
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : Any = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCAmelCase : Any = 3
_UpperCAmelCase : Optional[int] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : str = 2_0
_UpperCAmelCase : List[Any] = 4
_UpperCAmelCase : int = 0
_UpperCAmelCase : Optional[int] = 5
_UpperCAmelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCAmelCase : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=2_0 )
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCAmelCase : Dict = 3
_UpperCAmelCase : Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = 4
_UpperCAmelCase : Optional[int] = 1_0
_UpperCAmelCase : List[Any] = 1_5
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : Any = 1_5
# dummy input_ids and scores
_UpperCAmelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = input_ids.copy()
_UpperCAmelCase : str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = scores.copy()
# instantiate all dist processors
_UpperCAmelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : str = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : str = 1_0
# no processor list
_UpperCAmelCase : Optional[int] = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : str = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# with processor list
_UpperCAmelCase : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase : Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : int = 1_0
_UpperCAmelCase : Any = 1_5
_UpperCAmelCase : List[Any] = 2
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = 1_5
# dummy input_ids and scores
_UpperCAmelCase : List[Any] = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = input_ids.copy()
_UpperCAmelCase : Tuple = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = scores.copy()
# instantiate all dist processors
_UpperCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase : str = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : Dict = 1_0
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] ):
_UpperCAmelCase : List[str] = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
_UpperCAmelCase : int = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ):
_UpperCAmelCase : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase : Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
return scores
_UpperCAmelCase : Any = jax.jit(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = jax.jit(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 17 | '''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,)
UpperCamelCase_ : Tuple = 10
def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Any = self.dummy_model()
_UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = output.prev_sample
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = output.prev_sample
_UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : str = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : str = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : int = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3 | 17 | 1 |
"""simple docstring"""
import argparse
import copy
def __magic_name__ ( __snake_case : Any ) -> List[str]:
lowercase : Union[str, Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : int = []
_list.append([line.split()[0], line.split()[2]] )
lowercase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> Union[str, Any]:
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase : List[str] = f.read(1 )
lowercase : str = start_node
lowercase : Optional[Any] = []
lowercase : Union[str, Any] = start_node
lowercase : Optional[Any] = 0
while visiting not in first_solution:
lowercase : List[str] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase : List[str] = k[1]
lowercase : Union[str, Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase : List[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase : Union[str, Any] = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : Dict = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Tuple:
lowercase : Any = []
for n in solution[1:-1]:
lowercase : List[Any] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase : Union[str, Any] = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase : Union[str, Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase : str = kn
lowercase : Dict = n
lowercase : List[str] = 0
for k in _tmp[:-1]:
lowercase : Optional[Any] = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Optional[int] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[int]:
lowercase : Union[str, Any] = 1
lowercase : int = first_solution
lowercase : List[str] = []
lowercase : Union[str, Any] = distance_of_first_solution
lowercase : Optional[int] = solution
while count <= iters:
lowercase : Dict = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase : Tuple = 0
lowercase : str = neighborhood[index_of_best_solution]
lowercase : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase : int = False
while not found:
lowercase : Union[str, Any] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase : Optional[Any] = best_solution[i]
lowercase : Union[str, Any] = solution[i]
break
lowercase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : List[Any] = True
lowercase : Optional[int] = best_solution[:-1]
lowercase : Any = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : List[str] = cost
lowercase : Dict = solution
else:
lowercase : Any = index_of_best_solution + 1
lowercase : str = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase : str = count + 1
return best_solution_ever, best_cost
def __magic_name__ ( __snake_case : List[str]=None ) -> List[str]:
lowercase : Any = generate_neighbours(args.File )
lowercase : List[str] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase : Union[str, Any] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 202 | """simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def _UpperCAmelCase ( *__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : int = pipeline(
'zero-shot-object-detection' ,model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowercase_ : Optional[int] = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Tuple = object_detector(examples[0] ,threshold=0.0 )
lowercase_ : Any = len(__UpperCamelCase )
self.assertGreater(__UpperCamelCase ,0 )
self.assertEqual(
__UpperCamelCase ,[
{
'score': ANY(__UpperCamelCase ),
'label': ANY(__UpperCamelCase ),
'box': {'xmin': ANY(__UpperCamelCase ), 'ymin': ANY(__UpperCamelCase ), 'xmax': ANY(__UpperCamelCase ), 'ymax': ANY(__UpperCamelCase )},
}
for i in range(__UpperCamelCase )
] ,)
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = pipeline(
'zero-shot-object-detection' ,model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowercase_ : Optional[int] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' ,candidate_labels=['cat', 'remote', 'couch'] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] ,)
lowercase_ : Union[str, Any] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] ,)
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = pipeline('zero-shot-object-detection' )
lowercase_ : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] ,)
lowercase_ : Union[str, Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] ,)
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = 0.2
lowercase_ : Any = pipeline('zero-shot-object-detection' )
lowercase_ : str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,threshold=__UpperCamelCase ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] ,)
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = 2
lowercase_ : Optional[Any] = pipeline('zero-shot-object-detection' )
lowercase_ : List[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,top_k=__UpperCamelCase ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] ,)
| 213 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase_ : Dict = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase_ : Dict = 'UperNetConfig'
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int], str] = 0 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
a_ : Dict = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.ReLU()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
a_ : int = self.conv(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.batch_norm(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.activation(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
super().__init__()
a_ : str = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE__ ),
UperNetConvModule(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
a_ : Optional[int] = input
for layer in self.layers:
a_ : int = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple[int, ...] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ) -> None:
super().__init__()
a_ : Tuple = pool_scales
a_ : List[Any] = align_corners
a_ : Tuple = in_channels
a_ : Optional[Any] = channels
a_ : Tuple = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , channels=SCREAMING_SNAKE_CASE__ )
self.blocks.append(SCREAMING_SNAKE_CASE__ )
self.add_module(str(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> List[torch.Tensor]:
a_ : List[Any] = []
for ppm in self.blocks:
a_ : List[Any] = ppm(SCREAMING_SNAKE_CASE__ )
a_ : str = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE__ )
return ppm_outs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
super().__init__()
a_ : List[str] = config
a_ : Optional[int] = config.pool_scales # e.g. (1, 2, 3, 6)
a_ : int = in_channels
a_ : Dict = config.hidden_size
a_ : Optional[Any] = False
a_ : Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a_ : int = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a_ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a_ : str = nn.ModuleList()
a_ : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a_ : str = UperNetConvModule(SCREAMING_SNAKE_CASE__ , self.channels , kernel_size=1 )
a_ : Union[str, Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE__ )
self.fpn_convs.append(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
a_ : Dict = inputs[-1]
a_ : str = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE__ ) )
a_ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=1 )
a_ : Dict = self.bottleneck(SCREAMING_SNAKE_CASE__ )
return output
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
# build laterals
a_ : List[str] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE__ ) )
# build top-down path
a_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : Optional[Any] = laterals[i - 1].shape[2:]
a_ : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
a_ : List[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
a_ : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=1 )
a_ : Tuple = self.fpn_bottleneck(SCREAMING_SNAKE_CASE__ )
a_ : Dict = self.classifier(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
a_ : Optional[int] = config
a_ : str = config.auxiliary_in_channels
a_ : Optional[int] = config.auxiliary_channels
a_ : List[str] = config.auxiliary_num_convs
a_ : Dict = config.auxiliary_concat_input
a_ : int = in_index
a_ : List[Any] = (kernel_size // 2) * dilation
a_ : List[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ ) )
if self.num_convs == 0:
a_ : Optional[int] = nn.Identity()
else:
a_ : int = nn.Sequential(*SCREAMING_SNAKE_CASE__ )
if self.concat_input:
a_ : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 )
a_ : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
a_ : Optional[int] = encoder_hidden_states[self.in_index]
a_ : int = self.convs(SCREAMING_SNAKE_CASE__ )
if self.concat_input:
a_ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a_ : Dict = self.classifier(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = UperNetConfig
snake_case__ : Optional[int] = '''pixel_values'''
snake_case__ : Optional[int] = True
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=False ) -> Any:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Dict = value
UpperCAmelCase_ : int = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase_ : Tuple = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , lowercase__ , )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
super().__init__(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a_ : List[Any] = UperNetHead(SCREAMING_SNAKE_CASE__ , in_channels=self.backbone.channels )
a_ : Dict = UperNetFCNHead(SCREAMING_SNAKE_CASE__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
a_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : int = output_attentions if output_attentions is not None else self.config.output_attentions
a_ : str = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = outputs.feature_maps
a_ : Tuple = self.decode_head(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = nn.functional.interpolate(SCREAMING_SNAKE_CASE__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = None
if self.auxiliary_head is not None:
a_ : Any = self.auxiliary_head(SCREAMING_SNAKE_CASE__ )
a_ : int = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
a_ : List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a_ : Optional[Any] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a_ : Optional[Any] = (logits,) + outputs[1:]
else:
a_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 356 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Any = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : Dict = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
a_ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
a_ : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Tuple = self.get_env()
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
a_ : str = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Dict = '1'
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : Union[str, Any] = '\nfrom transformers import pipeline\n '
a_ : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
a_ : Union[str, Any] = self.get_env()
a_ : Optional[Any] = '1'
a_ : int = [sys.executable, '-c', '\n'.join([load, mock, run] )]
a_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : Optional[int] = '\nfrom transformers import AutoModel\n '
a_ : Dict = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
a_ : Tuple = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Optional[Any] = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Optional[int] = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 120 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = [], []
UpperCAmelCase = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase , UpperCAmelCase = sorted_examples[0]
def is_too_big(UpperCamelCase__ ):
return tok(UpperCamelCase__ , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase = new_src + ''' ''' + src
UpperCAmelCase = new_tgt + ''' ''' + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase = src, tgt
else: # can fit, keep adding
UpperCAmelCase , UpperCAmelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
UpperCAmelCase , UpperCAmelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCAmelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
UpperCAmelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
UpperCAmelCase , UpperCAmelCase = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F"""packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.""" )
Path(save_path / F"""{split}.source""" ).open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
Path(save_path / F"""{split}.target""" ).open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
for split in ["val", "test"]:
UpperCAmelCase , UpperCAmelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.target""" )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=UpperCamelCase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=UpperCamelCase__ , default=128 )
parser.add_argument('''--data_dir''' , type=UpperCamelCase__ )
parser.add_argument('''--save_path''' , type=UpperCamelCase__ )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 273 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 1 |
from __future__ import annotations
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : List[str] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_SCREAMING_SNAKE_CASE ) != 0:
A_ : Union[str, Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_SCREAMING_SNAKE_CASE ) != cols:
raise error
for value in row:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise error
A_ : int = rows
else:
A_ : Union[str, Any] = []
def _snake_case ( self )->list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _snake_case ( self )->int:
'''simple docstring'''
return len(self.rows )
@property
def _snake_case ( self )->int:
'''simple docstring'''
return len(self.rows[0] )
@property
def _snake_case ( self )->tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _snake_case ( self )->bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def _snake_case ( self )->Matrix:
'''simple docstring'''
A_ : Optional[Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _snake_case ( self )->bool:
'''simple docstring'''
return bool(self.determinant() )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : Tuple = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_SCREAMING_SNAKE_CASE ).determinant()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return -1 * self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _snake_case ( self )->Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _snake_case ( self )->Matrix:
'''simple docstring'''
A_ : Optional[int] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Matrix:
'''simple docstring'''
A_ : Dict = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self )->str:
'''simple docstring'''
return str(self.rows )
def __str__( self )->str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_SCREAMING_SNAKE_CASE ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->None:
'''simple docstring'''
A_ : Optional[int] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise type_error
for value in row:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(_SCREAMING_SNAKE_CASE ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_SCREAMING_SNAKE_CASE )
else:
A_ : List[str] = self.rows[0:position] + [row] + self.rows[position:]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->None:
'''simple docstring'''
A_ : Any = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise type_error
for value in column:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(_SCREAMING_SNAKE_CASE ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
A_ : Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A_ : Optional[Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _SCREAMING_SNAKE_CASE )->bool:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _SCREAMING_SNAKE_CASE )->bool:
'''simple docstring'''
return not self == other
def __neg__( self )->Matrix:
'''simple docstring'''
return self * -1
def __add__( self , _SCREAMING_SNAKE_CASE )->Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _SCREAMING_SNAKE_CASE )->Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _SCREAMING_SNAKE_CASE )->Matrix:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , _SCREAMING_SNAKE_CASE )->Matrix:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
A_ : str = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''convnextv2'''
def __init__(self , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0.0 , UpperCAmelCase=2_2_4 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase )
_lowercase =num_channels
_lowercase =patch_size
_lowercase =num_stages
_lowercase =[9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
_lowercase =[3, 3, 9, 3] if depths is None else depths
_lowercase =hidden_act
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =drop_path_rate
_lowercase =image_size
_lowercase =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
_lowercase , _lowercase =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 5 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(lowerCamelCase__ )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" )
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = """french fries"""
lowercase__ = sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = [inputs["""prompt"""]] * 2
lowercase__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
lowercase__ = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
lowercase__ = image / 2 + 0.5
lowercase__ = image.permute(0 , 3 , 1 , 2 )
lowercase__ = image.repeat(2 , 1 , 1 , 1 )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [round(lowerCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = VaeImageProcessor(do_resize=lowerCamelCase__ , do_normalize=lowerCamelCase__ )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" ) )[0]
lowercase__ = components["""vae"""]
lowercase__ = self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ = pipe(**lowerCamelCase__ )[0]
lowercase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase__ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
lowercase__ = torch.manual_seed(lowerCamelCase__ )
lowercase__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowercase__ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = 0
def callback_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
lowercase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__ = False
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ = inputs["""image"""].resize((504, 504) )
lowercase__ = """timbrooks/instruct-pix2pix"""
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = pipe(**lowerCamelCase__ )
lowercase__ = output.images[0]
lowercase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 164 | 0 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase ):
UpperCamelCase__ = data
UpperCamelCase__ = None
class __SCREAMING_SNAKE_CASE :
def __init__( self ):
UpperCamelCase__ = None
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.head
while temp is not None:
print(temp.data , end=""" """ )
UpperCamelCase__ = temp.next
print()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = Node(__lowerCAmelCase )
UpperCamelCase__ = self.head
UpperCamelCase__ = new_node
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
if node_data_a == node_data_a:
return
else:
UpperCamelCase__ = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase__ = node_a.next
UpperCamelCase__ = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase__ = node_a.next
if node_a is None or node_a is None:
return
UpperCamelCase__ , UpperCamelCase__ = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 359 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCamelCase__ = "sshleifer/mar_enro_6_3_student"
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self ):
super().setUp()
UpperCamelCase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=__lowerCAmelCase , )
UpperCamelCase__ = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
MarianMTModel.from_pretrained(__lowerCAmelCase )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
UpperCamelCase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
UpperCamelCase__ = bash_script.replace(__lowerCAmelCase , str(__lowerCAmelCase ) )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCamelCase__ = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCamelCase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(__lowerCAmelCase , """argv""" , __lowerCAmelCase ):
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = pl.Trainer.add_argparse_args(__lowerCAmelCase )
UpperCamelCase__ = SummarizationModule.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = main(__lowerCAmelCase )
# Check metrics
UpperCamelCase__ = load_json(model.metrics_save_path )
UpperCamelCase__ = metrics["""val"""][0]
UpperCamelCase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __lowerCAmelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase__ = os.listdir(__lowerCAmelCase )
UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase__ = os.path.join(args.output_dir , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase__ = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __SCREAMING_SNAKE_CASE ( _a ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
UpperCamelCase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
UpperCamelCase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
UpperCamelCase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
UpperCamelCase__ = bash_script.replace(__lowerCAmelCase , str(__lowerCAmelCase ) )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = bash_script.replace("""--fp16""" , """""" )
UpperCamelCase__ = 6
UpperCamelCase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
"""--gpus=1""",
"""--learning_rate=1e-3""",
f"""--num_train_epochs={epochs}""",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(__lowerCAmelCase , """argv""" , __lowerCAmelCase ):
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = pl.Trainer.add_argparse_args(__lowerCAmelCase )
UpperCamelCase__ = SummarizationDistiller.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCamelCase__ = distill_main(__lowerCAmelCase )
# Check metrics
UpperCamelCase__ = load_json(model.metrics_save_path )
UpperCamelCase__ = metrics["""val"""][0]
UpperCamelCase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __lowerCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase__ = os.listdir(__lowerCAmelCase )
UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase__ = os.path.join(args.output_dir , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase__ = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 87 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
__lowercase = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self : Union[str, Any] ):
__lowercase = None
__lowercase = 2_0
__lowercase = self._get_uniform_logits(batch_size=2, length=UpperCAmelCase__ )
# tweak scores to not be uniform anymore
__lowercase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowercase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowercase = jax.nn.softmax(UpperCAmelCase__, axis=-1 )
__lowercase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowercase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowercase = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__, scores.copy(), cur_len=UpperCAmelCase__ ), axis=-1 )
__lowercase = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__, scores.copy(), cur_len=UpperCAmelCase__ ), axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min() )
def _lowercase ( self : Union[str, Any] ):
__lowercase = None
__lowercase = 1_0
__lowercase = 2
# create ramp distribution
__lowercase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :], (batch_size, vocab_size) ).copy()
__lowercase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowercase = FlaxTopKLogitsWarper(3 )
__lowercase = top_k_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(), 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(), 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowercase = 5
__lowercase = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3 )
__lowercase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :], (batch_size, length) ).copy()
__lowercase = top_k_warp_safety_check(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(), [2, 2] )
def _lowercase ( self : Any ):
__lowercase = None
__lowercase = 1_0
__lowercase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowercase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowercase = FlaxTopPLogitsWarper(0.8 )
__lowercase = np.exp(top_p_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowercase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1E-3 ) )
# check edge cases with negative and extreme logits
__lowercase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :], (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowercase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__lowercase = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0 )
__lowercase = top_p_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(), [3, 2] )
def _lowercase ( self : List[Any] ):
__lowercase = 2_0
__lowercase = 4
__lowercase = 0
__lowercase = FlaxMinLengthLogitsProcessor(min_length=1_0, eos_token_id=UpperCAmelCase__ )
# check that min length is applied at length 5
__lowercase = ids_tensor((batch_size, 2_0), vocab_size=2_0 )
__lowercase = 5
__lowercase = self._get_uniform_logits(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = min_dist_processor(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
__lowercase = self._get_uniform_logits(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = 1_5
__lowercase = min_dist_processor(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def _lowercase ( self : Tuple ):
__lowercase = 2_0
__lowercase = 4
__lowercase = 0
__lowercase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
__lowercase = ids_tensor((batch_size, 1), vocab_size=2_0 )
__lowercase = 1
__lowercase = self._get_uniform_logits(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = logits_processor(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowercase = 3
__lowercase = self._get_uniform_logits(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = logits_processor(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def _lowercase ( self : List[Any] ):
__lowercase = 2_0
__lowercase = 4
__lowercase = 0
__lowercase = 5
__lowercase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__, eos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowercase = ids_tensor((batch_size, 4), vocab_size=2_0 )
__lowercase = 4
__lowercase = self._get_uniform_logits(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = logits_processor(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowercase = 3
__lowercase = self._get_uniform_logits(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = logits_processor(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def _lowercase ( self : Union[str, Any] ):
__lowercase = 4
__lowercase = 1_0
__lowercase = 1_5
__lowercase = 2
__lowercase = 1
__lowercase = 1_5
# dummy input_ids and scores
__lowercase = ids_tensor((batch_size, sequence_length), UpperCAmelCase__ )
__lowercase = input_ids.copy()
__lowercase = self._get_uniform_logits(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = scores.copy()
# instantiate all dist processors
__lowercase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowercase = FlaxTopKLogitsWarper(3 )
__lowercase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowercase = FlaxMinLengthLogitsProcessor(min_length=1_0, eos_token_id=UpperCAmelCase__ )
__lowercase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
__lowercase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__, eos_token_id=UpperCAmelCase__ )
__lowercase = 1_0
# no processor list
__lowercase = temp_dist_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = top_k_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = top_p_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = min_dist_proc(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = bos_dist_proc(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = eos_dist_proc(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
# with processor list
__lowercase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowercase = processor(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
def _lowercase ( self : List[Any] ):
__lowercase = 4
__lowercase = 1_0
__lowercase = 1_5
__lowercase = 2
__lowercase = 1
__lowercase = 1_5
# dummy input_ids and scores
__lowercase = ids_tensor((batch_size, sequence_length), UpperCAmelCase__ )
__lowercase = input_ids.copy()
__lowercase = self._get_uniform_logits(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = scores.copy()
# instantiate all dist processors
__lowercase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowercase = FlaxTopKLogitsWarper(3 )
__lowercase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowercase = FlaxMinLengthLogitsProcessor(min_length=1_0, eos_token_id=UpperCAmelCase__ )
__lowercase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
__lowercase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__, eos_token_id=UpperCAmelCase__ )
__lowercase = 1_0
# no processor list
def run_no_processor_list(UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any ):
__lowercase = temp_dist_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = top_k_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = top_p_warp(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = min_dist_proc(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = bos_dist_proc(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
__lowercase = eos_dist_proc(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
return scores
# with processor list
def run_processor_list(UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int] ):
__lowercase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowercase = processor(UpperCAmelCase__, UpperCAmelCase__, cur_len=UpperCAmelCase__ )
return scores
__lowercase = jax.jit(UpperCAmelCase__ )
__lowercase = jax.jit(UpperCAmelCase__ )
__lowercase = jitted_run_no_processor_list(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = jitted_run_processor_list(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
| 17 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_snake_case = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = GPTaTokenizer
def __init__( self, __a=None, __a=None, __a=None, __a="<|endoftext|>", __a="<|endoftext|>", __a="<|endoftext|>", __a=False, **__a, ):
'''simple docstring'''
super().__init__(
__a, __a, tokenizer_file=__a, unk_token=__a, bos_token=__a, eos_token=__a, add_prefix_space=__a, **__a, )
_lowerCAmelCase : Tuple = kwargs.pop("add_bos_token", __a)
_lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", __a) != add_prefix_space:
_lowerCAmelCase : Optional[Any] = getattr(__a, pre_tok_state.pop("type"))
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**__a)
_lowerCAmelCase : List[Any] = add_prefix_space
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words", __a)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = kwargs.get("is_split_into_words", __a)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a, **__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a, add_special_tokens=__a) + [self.eos_token_id])
if len(__a) > self.model_max_length:
_lowerCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 371 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.