code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self , lowercase ):
_lowerCamelCase : int = 'こんにちは、世界。 \nこんばんは、世界。'
_lowerCamelCase : Union[str, Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def A_ ( self , lowercase ):
_lowerCamelCase, _lowerCamelCase : Any = self.get_input_output_texts(lowercase )
_lowerCamelCase : Tuple = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Dict = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
return text, ids
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowercase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A_ ( self ):
_lowerCamelCase : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowercase )
_lowerCamelCase : int = 'こんにちは、世界。\nこんばんは、世界。'
_lowerCamelCase : Tuple = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowercase , 'wb' ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , 'rb' ) as handle:
_lowerCamelCase : Dict = pickle.load(lowercase )
_lowerCamelCase : Dict = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : Dict = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : Any = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
_lowerCamelCase : List[Any] = MecabTokenizer(do_lower_case=lowercase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : Tuple = MecabTokenizer(
do_lower_case=lowercase , normalize_text=lowercase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = MecabTokenizer(normalize_text=lowercase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowercase )
_lowerCamelCase : int = 'こんにちは、世界。\nこんばんは、世界。'
_lowerCamelCase : Tuple = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowercase , 'wb' ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , 'rb' ) as handle:
_lowerCamelCase : int = pickle.load(lowercase )
_lowerCamelCase : List[Any] = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Tuple = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : List[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Tuple = SudachiTokenizer(do_lower_case=lowercase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Tuple = SudachiTokenizer(normalize_text=lowercase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : str = SudachiTokenizer(trim_whitespace=lowercase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowercase )
_lowerCamelCase : Optional[Any] = 'こんにちは、世界。\nこんばんは、世界。'
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowercase , 'wb' ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , 'rb' ) as handle:
_lowerCamelCase : Tuple = pickle.load(lowercase )
_lowerCamelCase : Dict = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : str = JumanppTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : List[Any] = JumanppTokenizer(normalize_text=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = JumanppTokenizer(trim_whitespace=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Optional[int] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def A_ ( self ):
_lowerCamelCase : Any = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
_lowerCamelCase : int = {}
for i, token in enumerate(lowercase ):
_lowerCamelCase : List[str] = i
_lowerCamelCase : Tuple = WordpieceTokenizer(vocab=lowercase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
_lowerCamelCase : Optional[int] = tokenizer.subword_tokenizer
_lowerCamelCase : Union[str, Any] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowercase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
_lowerCamelCase : Optional[int] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowercase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def A_ ( self ):
_lowerCamelCase : str = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
_lowerCamelCase : int = tokenizer.encode('ありがとう。' , add_special_tokens=lowercase )
_lowerCamelCase : Any = tokenizer.encode('どういたしまして。' , add_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
_lowerCamelCase : int = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self , **lowercase ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = 'こんにちは、世界。 \nこんばんは、世界。'
_lowerCamelCase : Dict = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
_lowerCamelCase : List[Any] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowercase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_lowerCamelCase : Optional[int] = {}
for i, token in enumerate(lowercase ):
_lowerCamelCase : str = i
_lowerCamelCase : List[Any] = CharacterTokenizer(vocab=lowercase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
_lowerCamelCase : str = tokenizer.encode('ありがとう。' , add_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=lowercase )
_lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase )
_lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'cl-tohoku/bert-base-japanese'
_lowerCamelCase : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Any = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowercase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
_lowerCamelCase : Optional[Any] = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowercase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) ) | 96 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """"""
lowercase__ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : List[str], lowerCamelCase : Optional[DatasetInfo] = None, lowerCamelCase : Optional[str] = None, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(self, **lowerCamelCase )
lowercase__ = repo_info
lowercase__ = token
lowercase__ = None
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
lowercase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase__ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowerCamelCase ): {'''name''': str(lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : str = "rb", **lowerCamelCase : Any, ):
'''simple docstring'''
if not isinstance(self.repo_info, lowerCamelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowercase__ = hf_hub_url(self.repo_info.id, lowerCamelCase, revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase, mode=lowerCamelCase, headers=get_authentication_headers_for_url(lowerCamelCase, use_auth_token=self.token ), client_kwargs={'''trust_env''': True}, ).open()
def lowercase__ ( self : Dict, lowerCamelCase : Any, **lowerCamelCase : int ):
'''simple docstring'''
self._get_dirs()
lowercase__ = self._strip_protocol(lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=False, **lowerCamelCase : str ):
'''simple docstring'''
self._get_dirs()
lowercase__ = PurePosixPath(path.strip('''/''' ) )
lowercase__ = {}
for p, f in self.dir_cache.items():
lowercase__ = PurePosixPath(p.strip('''/''' ) )
lowercase__ = p.parent
if root == path:
lowercase__ = f
lowercase__ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 207 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[Any] = 2
while i * i <= n:
__UpperCAmelCase : str = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Any = 1
__UpperCAmelCase : str = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 366 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : List[Any] = scope
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
__UpperCAmelCase : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = LlamaModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : Optional[Any] = """single_label_classification"""
__UpperCAmelCase : int = input_dict["""input_ids"""]
__UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : str = """multi_label_classification"""
__UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __A ( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
__UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0}
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
__UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__UpperCAmelCase : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) )
__UpperCAmelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """
__UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" )
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase )
# greedy generation outputs
__UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 16 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 340 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
@staticmethod
def __magic_name__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
pass
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Any = np.array(_A )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
lowercase__ : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowercase__ : Dict = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = MaskGenerationPipeline(model=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def __magic_name__ ( self ) -> str:
pass
@slow
@require_torch
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : List[str] = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__magic_name__ : str = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
__magic_name__ : str = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __magic_name__ ( self ) -> int:
__magic_name__ : Union[str, Any] = """facebook/sam-vit-huge"""
__magic_name__ : Optional[Any] = pipeline("""mask-generation""" , model=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
__magic_name__ : List[Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_0_5_3},
] , )
| 138 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__magic_name__ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__magic_name__ : Tuple = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__magic_name__ : List[Any] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__magic_name__ : Any = shift_tokens_right(lowerCAmelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__magic_name__ : List[Any] = model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ).logits
__magic_name__ : Tuple = optax.softmax_cross_entropy(lowerCAmelCase__ , onehot(lowerCAmelCase__ , logits.shape[-1] ) ).mean()
__magic_name__ : List[Any] = -(labels.shape[-1] * loss.item())
__magic_name__ : List[Any] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 138 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase : Optional[int] = model_type_to_module_name(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = importlib.import_module(f""".{module_name}""" , "transformers.models" )
try:
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE , "__name__" , SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase : Optional[int] = importlib.import_module("transformers" )
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None
def a__ ( SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase : Dict = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as reader:
return json.load(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def lowercase__ ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = kwargs.pop("config" , snake_case__ )
lowerCAmelCase : List[str] = kwargs.pop("trust_remote_code" , snake_case__ )
lowerCAmelCase : List[Any] = True
lowerCAmelCase , lowerCAmelCase : Tuple = ImageProcessingMixin.get_image_processor_dict(snake_case__ , **snake_case__ )
lowerCAmelCase : int = config_dict.get("image_processor_type" , snake_case__ )
lowerCAmelCase : str = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
lowerCAmelCase : Dict = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCAmelCase : str = config_dict.pop("feature_extractor_type" , snake_case__ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
lowerCAmelCase : List[str] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowerCAmelCase : List[Any] = config_dict["auto_map"]["AutoFeatureExtractor"]
lowerCAmelCase : List[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.image_processor_type``
lowerCAmelCase : str = getattr(snake_case__ , "image_processor_type" , snake_case__ )
if hasattr(snake_case__ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
lowerCAmelCase : Union[str, Any] = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
lowerCAmelCase : Union[str, Any] = image_processor_class_from_name(snake_case__ )
lowerCAmelCase : List[Any] = image_processor_auto_map is not None
lowerCAmelCase : int = image_processor_class is not None or type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING
lowerCAmelCase : int = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
lowerCAmelCase : List[Any] = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
lowerCAmelCase : str = kwargs.pop("code_revision" , snake_case__ )
if os.path.isdir(snake_case__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING:
lowerCAmelCase : Optional[Any] = IMAGE_PROCESSOR_MAPPING[type(snake_case__ )]
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(snake_case__ , snake_case__ )
| 108 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_disk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
print("moving disk from" , SCREAMING_SNAKE_CASE , "to" , SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = int(input("Height of hanoi: " ).strip() )
move_tower(SCREAMING_SNAKE_CASE , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 108 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = AlbertConfig.from_json_file(A__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :int = AlbertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(A__ , A__ , A__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 351 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__lowercase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase=False , __lowercase=False , __lowercase=6.0 , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase="fp4" , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :List[str] = load_in_abit
__UpperCamelCase :Union[str, Any] = load_in_abit
__UpperCamelCase :str = llm_inta_threshold
__UpperCamelCase :List[str] = llm_inta_skip_modules
__UpperCamelCase :Any = llm_inta_enable_fpaa_cpu_offload
__UpperCamelCase :List[Any] = llm_inta_has_fpaa_weight
__UpperCamelCase :str = bnb_abit_quant_type
__UpperCamelCase :Optional[int] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__UpperCamelCase :Tuple = torch.floataa
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = getattr(__lowercase , __lowercase)
elif isinstance(__lowercase , torch.dtype):
__UpperCamelCase :int = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''')
self.post_init()
def UpperCamelCase__ ( self) -> Union[str, Any]:
if not isinstance(self.llm_inta_threshold , __lowercase):
raise ValueError('''llm_int8_threshold must be a float''')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowercase):
raise ValueError('''llm_int8_skip_modules must be a list of strings''')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowercase):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''')
if not isinstance(self.llm_inta_has_fpaa_weight , __lowercase):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''')
if not isinstance(self.bnb_abit_quant_type , __lowercase):
raise ValueError('''bnb_4bit_quant_type must be a string''')
if not isinstance(self.bnb_abit_use_double_quant , __lowercase):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''')
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''')) >= version.parse(
'''0.39.0'''):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''')
def UpperCamelCase__ ( self) -> Any:
return self.load_in_abit or self.load_in_abit
def UpperCamelCase__ ( self) -> List[Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase , **__lowercase) -> List[str]:
__UpperCamelCase :Optional[int] = cls(**__lowercase)
__UpperCamelCase :Optional[Any] = []
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase):
setattr(__lowercase , __lowercase , __lowercase)
to_remove.append(__lowercase)
for key in to_remove:
kwargs.pop(__lowercase , __lowercase)
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
with open(__lowercase , '''w''' , encoding='''utf-8''') as writer:
__UpperCamelCase :Optional[int] = self.to_dict()
__UpperCamelCase :Optional[int] = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase) + '''\n'''
writer.write(__lowercase)
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Optional[Any] = copy.deepcopy(self.__dict__)
__UpperCamelCase :Optional[int] = str(output['''bnb_4bit_compute_dtype''']).split('''.''')[1]
return output
def __repr__( self) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCamelCase__ ( self , __lowercase = True) -> str:
if use_diff is True:
__UpperCamelCase :Union[str, Any] = self.to_diff_dict()
else:
__UpperCamelCase :Dict = self.to_dict()
return json.dumps(__lowercase , indent=2 , sort_keys=__lowercase) + "\n"
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Union[str, Any] = self.to_dict()
# get the default config dict
__UpperCamelCase :Optional[Any] = BitsAndBytesConfig().to_dict()
__UpperCamelCase :str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__UpperCamelCase :str = value
return serializable_config_dict
| 105 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase : int = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Any):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
| 13 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
class __UpperCamelCase :
def __init__( self :Dict ,_UpperCamelCase :list[list[int]] ):
snake_case_ : Optional[int] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(a_ ) != 0:
snake_case_ : Any = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a_ ) != cols:
raise error
for value in row:
if not isinstance(a_ ,(int, float) ):
raise error
snake_case_ : Optional[Any] = rows
else:
snake_case_ : Tuple = []
def a__ ( self :List[Any] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def a__ ( self :Any ):
return len(self.rows )
@property
def a__ ( self :List[str] ):
return len(self.rows[0] )
@property
def a__ ( self :List[str] ):
return (self.num_rows, self.num_columns)
@property
def a__ ( self :str ):
return self.order[0] == self.order[1]
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a_ )
def a__ ( self :List[str] ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def a__ ( self :List[Any] ):
return bool(self.determinant() )
def a__ ( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ):
snake_case_ : List[str] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a_ ).determinant()
def a__ ( self :int ,_UpperCamelCase :int ,_UpperCamelCase :int ):
if (row + column) % 2 == 0:
return self.get_minor(a_ ,a_ )
return -1 * self.get_minor(a_ ,a_ )
def a__ ( self :Dict ):
return Matrix(
[
[self.get_minor(a_ ,a_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def a__ ( self :Tuple ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def a__ ( self :Union[str, Any] ):
snake_case_ : str = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a_ )
def a__ ( self :Union[str, Any] ):
snake_case_ : Tuple = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self :List[str] ):
return str(self.rows )
def __str__( self :int ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(a_ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def a__ ( self :int ,_UpperCamelCase :list[int] ,_UpperCamelCase :int | None = None ):
snake_case_ : Union[str, Any] = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(a_ ,a_ ):
raise type_error
for value in row:
if not isinstance(a_ ,(int, float) ):
raise type_error
if len(a_ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(a_ )
else:
snake_case_ : str = self.rows[0:position] + [row] + self.rows[position:]
def a__ ( self :str ,_UpperCamelCase :list[int] ,_UpperCamelCase :int | None = None ):
snake_case_ : Union[str, Any] = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(a_ ,a_ ):
raise type_error
for value in column:
if not isinstance(a_ ,(int, float) ):
raise type_error
if len(a_ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
snake_case_ : int = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
snake_case_ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self :int ,_UpperCamelCase :object ):
if not isinstance(a_ ,a_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self :int ,_UpperCamelCase :object ):
return not self == other
def __neg__( self :int ):
return self * -1
def __add__( self :Optional[Any] ,_UpperCamelCase :Matrix ):
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self :Any ,_UpperCamelCase :Matrix ):
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self :Tuple ,_UpperCamelCase :Matrix | int | float ):
if isinstance(a_ ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a_ ,a_ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(a_ ,a_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self :int ,_UpperCamelCase :int ):
if not isinstance(a_ ,a_ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
snake_case_ : Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def a__ ( cls :str ,_UpperCamelCase :list[int] ,_UpperCamelCase :list[int] ):
return sum(row[i] * column[i] for i in range(len(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Any = tmp_path / """file.csv"""
snake_case_ : Any = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[int] = tmp_path / """malformed_file.csv"""
snake_case_ : int = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : str = tmp_path / """csv_with_image.csv"""
snake_case_ : int = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : int = tmp_path / """csv_with_label.csv"""
snake_case_ : Tuple = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv"""
snake_case_ : str = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ):
'''simple docstring'''
snake_case_ : int = Csv()
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(lowerCamelCase_ ) in record.message
for record in caplog.records )
@require_pil
def UpperCAmelCase ( lowerCamelCase_ :Tuple ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : Tuple = f.read().splitlines()[1]
snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] )
snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
snake_case_ : List[str] = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : List[Any] = f.read().splitlines()[1:]
snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
snake_case_ : Dict = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 8 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCAmelCase=[2, 2, 3, 2] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1_0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :str = parent
lowerCAmelCase__ :List[Any] = batch_size
lowerCAmelCase__ :Union[str, Any] = image_size
lowerCAmelCase__ :List[Any] = num_channels
lowerCAmelCase__ :int = num_stages
lowerCAmelCase__ :Optional[int] = hidden_sizes
lowerCAmelCase__ :Any = depths
lowerCAmelCase__ :Optional[int] = is_training
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :List[str] = intermediate_size
lowerCAmelCase__ :str = hidden_act
lowerCAmelCase__ :Optional[int] = type_sequence_label_size
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :Tuple = out_features
lowerCAmelCase__ :int = num_labels
lowerCAmelCase__ :Optional[Any] = scope
lowerCAmelCase__ :int = num_stages
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :Dict = None
if self.use_labels:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case ( self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCAmelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = UperNetForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :List[str] = config_and_inputs
lowerCAmelCase__ :Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__magic_name__ :List[str] = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
__magic_name__ :Optional[Any] = False
__magic_name__ :Optional[int] = False
__magic_name__ :List[Any] = False
__magic_name__ :List[Any] = False
__magic_name__ :Union[str, Any] = False
__magic_name__ :Optional[int] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = UperNetModelTester(self )
lowerCAmelCase__ :Union[str, Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
'''simple docstring'''
return
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :int = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ :int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def snake_case ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ :str = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ :Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ :Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :List[str] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :Any = _config_zero_init(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def snake_case ( self ):
'''simple docstring'''
pass
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Any = UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __A () ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
lowerCAmelCase__ :Optional[Any] = Image.open(_SCREAMING_SNAKE_CASE ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
lowerCAmelCase__ :List[Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = prepare_img()
lowerCAmelCase__ :Tuple = processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ :str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
lowerCAmelCase__ :Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = prepare_img()
lowerCAmelCase__ :List[str] = processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Union[str, Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ :str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 254 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :list[list[int]] = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase__ :str = 1
for n in range(m + 1 ):
for k in range(1 , _SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__A = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
__A = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 254 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : Optional[int] , _A : int , _A : int , _A : int , _A : float , _A : int , _A : int , _A : int , _A : int , _A : str , _A : bool = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase : List[str] = nn.Embedding(_A , _A )
lowercase : str = nn.Embedding(_A , _A )
lowercase : int = False
lowercase : int = nn.Dropout(p=_A )
lowercase : List[str] = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
lowercase : Optional[Any] = nn.ModuleList()
for lyr_num in range(_A ):
lowercase : Union[str, Any] = TaBlock(_A )
self.encoders.append(_A )
lowercase : Union[str, Any] = TaLayerNorm(_A )
lowercase : Optional[Any] = nn.Dropout(p=_A )
def __a ( self : Any , _A : Dict , _A : Dict ) -> str:
"""simple docstring"""
lowercase : List[str] = self.token_embedder(_A )
lowercase : Dict = encoder_input_tokens.shape[1]
lowercase : Dict = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
lowercase : int = self.dropout_pre(_A )
# inverted the attention mask
lowercase : Optional[Any] = encoder_input_tokens.size()
lowercase : Union[str, Any] = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
lowercase : Union[str, Any] = lyr(_A , _A )[0]
lowercase : Optional[Any] = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask | 308 |
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 308 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
A__ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowercase_ )
DownloadCommand.register_subcommand(lowercase_ )
EnvironmentCommand.register_subcommand(lowercase_ )
RunCommand.register_subcommand(lowercase_ )
ServeCommand.register_subcommand(lowercase_ )
UserCommands.register_subcommand(lowercase_ )
AddNewModelCommand.register_subcommand(lowercase_ )
AddNewModelLikeCommand.register_subcommand(lowercase_ )
LfsCommands.register_subcommand(lowercase_ )
PTtoTFCommand.register_subcommand(lowercase_ )
# Let's go
A__ = parser.parse_args()
if not hasattr(lowercase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
A__ = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 355 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 0 |
from bisect import bisect
from itertools import accumulate
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Dict ) -> str:
__snake_case = sorted(zip(snake_case_ , snake_case_ ) , key=lambda snake_case_ : x[0] / x[1] , reverse=snake_case_ )
__snake_case , __snake_case = [i[0] for i in r], [i[1] for i in r]
__snake_case = list(accumulate(snake_case_ ) )
__snake_case = bisect(snake_case_ , snake_case_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_euler' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_euler' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=A_ , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 222 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Tuple ) -> Optional[int]:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
lowerCAmelCase = model
lowerCAmelCase = kwargs.get('model_save_dir' , UpperCAmelCase__ )
lowerCAmelCase = kwargs.get('latest_model_name' , UpperCAmelCase__ )
def __call__( self : int , **UpperCAmelCase__ : List[Any] ) -> Any:
lowerCAmelCase = {k: np.array(UpperCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(UpperCAmelCase__ , UpperCAmelCase__ )
@staticmethod
def __UpperCAmelCase ( UpperCAmelCase__ : Union[str, Path] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None ) -> Tuple:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
lowerCAmelCase = 'CPUExecutionProvider'
return ort.InferenceSession(UpperCAmelCase__ , providers=[provider] , sess_options=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, Path] , UpperCAmelCase__ : Optional[str] = None , **UpperCAmelCase__ : Dict ) -> List[str]:
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase = Path(UpperCAmelCase__ ).joinpath(UpperCAmelCase__ )
try:
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase = self.model_save_dir.joinpath(UpperCAmelCase__ )
if src_path.exists():
lowerCAmelCase = Path(UpperCAmelCase__ ).joinpath(UpperCAmelCase__ )
try:
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
if os.path.isfile(UpperCAmelCase__ ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# saving model weights/files
self._save_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def __UpperCAmelCase ( cls : List[str] , UpperCAmelCase__ : Union[str, Path] , UpperCAmelCase__ : Optional[Union[bool, str, None]] = None , UpperCAmelCase__ : Optional[Union[str, None]] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional["ort.SessionOptions"] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> List[Any]:
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCAmelCase__ ):
lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , provider=UpperCAmelCase__ , sess_options=UpperCAmelCase__ )
lowerCAmelCase = Path(UpperCAmelCase__ )
# load model from hub
else:
# download model
lowerCAmelCase = hf_hub_download(
repo_id=UpperCAmelCase__ , filename=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , revision=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , )
lowerCAmelCase = Path(UpperCAmelCase__ ).parent
lowerCAmelCase = Path(UpperCAmelCase__ ).name
lowerCAmelCase = OnnxRuntimeModel.load_model(UpperCAmelCase__ , provider=UpperCAmelCase__ , sess_options=UpperCAmelCase__ )
return cls(model=UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def __UpperCAmelCase ( cls : List[str] , UpperCAmelCase__ : Union[str, Path] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , **UpperCAmelCase__ : Any , ) -> Tuple:
lowerCAmelCase = None
if len(str(UpperCAmelCase__ ).split('@' ) ) == 2:
lowerCAmelCase , lowerCAmelCase = model_id.split('@' )
return cls._from_pretrained(
model_id=UpperCAmelCase__ , revision=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 55 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=9_9 , UpperCAmelCase__ : Any=3_6 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=1_0_0_0 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = text_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase = text_seq_length
lowerCAmelCase = (image_size // patch_size) ** 2 + 1
lowerCAmelCase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> str:
lowerCAmelCase = LayoutLMvaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# text + image
lowerCAmelCase = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase = model(pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=False ) -> Optional[int]:
lowerCAmelCase = copy.deepcopy(UpperCAmelCase__ )
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Any ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LayoutLMvaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[1, 2]] )
lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase = model(
input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 55 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCamelCase( a=None ):
if subparsers is not None:
__a = subparsers.add_parser("test" )
else:
__a = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=a , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=a )
return parser
def _lowerCamelCase( a ):
__a = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__a = script_name
else:
__a = F"--config_file={args.config_file} {script_name}"
__a = ["accelerate-launch"] + test_args.split()
__a = execute_subprocess_async(a , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _lowerCamelCase( ):
__a = test_command_parser()
__a = parser.parse_args()
test_command(a )
if __name__ == "__main__":
main()
| 261 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class UpperCamelCase__( _lowerCamelCase ):
lowerCAmelCase__ : str = 'conditional_detr'
lowerCAmelCase__ : int = ['past_key_values']
lowerCAmelCase__ : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=3 ,__UpperCAmelCase=3_00 ,__UpperCAmelCase=6 ,__UpperCAmelCase=20_48 ,__UpperCAmelCase=8 ,__UpperCAmelCase=6 ,__UpperCAmelCase=20_48 ,__UpperCAmelCase=8 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase="relu" ,__UpperCAmelCase=2_56 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=False ,__UpperCAmelCase="sine" ,__UpperCAmelCase="resnet50" ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=2 ,__UpperCAmelCase=5 ,__UpperCAmelCase=2 ,__UpperCAmelCase=1 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,__UpperCAmelCase=5 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.2_5 ,**__UpperCAmelCase ,) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = backbone_config.get('model_type' )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(__UpperCAmelCase )
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = cls_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = focal_alpha
super().__init__(is_encoder_decoder=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def snake_case__ ( self ) -> List[str]:
return self.encoder_attention_heads
@property
def snake_case__ ( self ) -> Optional[Any]:
return self.d_model
def snake_case__ ( self ) -> List[str]:
A__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
class UpperCamelCase__( _lowerCamelCase ):
lowerCAmelCase__ : int = version.parse('1.11' )
@property
def snake_case__ ( self ) -> Optional[int]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def snake_case__ ( self ) -> Any:
return 1e-5
@property
def snake_case__ ( self ) -> int:
return 12
| 353 | """simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = BloomTokenizerFast
lowerCAmelCase__ : Union[str, Any] = BloomTokenizerFast
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : int = 'tokenizer_file'
lowerCAmelCase__ : Dict = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def snake_case__ ( self ) -> Optional[int]:
super().setUp()
A__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ,**__UpperCAmelCase ) -> int:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_rust_tokenizer()
A__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
A__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
A__ = tokenizer.batch_encode_plus(__UpperCAmelCase )['input_ids']
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
A__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(__UpperCAmelCase ,max_length=__UpperCAmelCase )
tokenizer_r.encode_plus(__UpperCAmelCase ,max_length=__UpperCAmelCase )
tokenizer_r.batch_encode_plus(__UpperCAmelCase ,max_length=__UpperCAmelCase )
tokenizer_r.encode(__UpperCAmelCase ,max_length=__UpperCAmelCase )
tokenizer_r.batch_encode_plus(__UpperCAmelCase ,max_length=__UpperCAmelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
A__ = None # Hotfixing padding = None
self.assertRaises(__UpperCAmelCase ,tokenizer_r.encode ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' )
# Simple input
self.assertRaises(__UpperCAmelCase ,tokenizer_r.encode_plus ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' )
# Simple input
self.assertRaises(
__UpperCAmelCase ,tokenizer_r.batch_encode_plus ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' ,)
# Pair input
self.assertRaises(__UpperCAmelCase ,tokenizer_r.encode ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' )
# Pair input
self.assertRaises(__UpperCAmelCase ,tokenizer_r.encode_plus ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' )
# Pair input
self.assertRaises(
__UpperCAmelCase ,tokenizer_r.batch_encode_plus ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' ,)
def snake_case__ ( self ) -> Tuple:
A__ = self.get_rust_tokenizer()
A__ = load_dataset('xnli' ,'all_languages' ,split='test' ,streaming=__UpperCAmelCase )
A__ = next(iter(__UpperCAmelCase ) )['premise'] # pick up one data
A__ = list(sample_data.values() )
A__ = list(map(tokenizer.encode ,__UpperCAmelCase ) )
A__ = [tokenizer.decode(__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 154 | 0 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def a_ ( lowerCamelCase ):
return input_array.reshape((input_array.size, 1) )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = np.nan
for i in range(lowerCamelCase ):
UpperCAmelCase__ = features[:, labels == i]
UpperCAmelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase__ = data - column_reshape(lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ = np.dot(lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = features.mean(1 )
UpperCAmelCase__ = np.nan
for i in range(lowerCamelCase ):
UpperCAmelCase__ = features[:, labels == i]
UpperCAmelCase__ = data.shape[1]
UpperCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ = device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def a_ ( lowerCamelCase , lowerCamelCase ):
# Check if the features have been loaded
if features.any():
UpperCAmelCase__ = features.mean(1 )
# Center the dataset
UpperCAmelCase__ = features - np.reshape(lowerCamelCase , (data_mean.size, 1) )
UpperCAmelCase__ = np.dot(lowerCamelCase , centered_data.T ) / features.shape[1]
UpperCAmelCase__ , UpperCAmelCase__ = np.linalg.eigh(lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase__ = np.dot(filtered_eigenvectors.T , lowerCamelCase )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase__ , UpperCAmelCase__ = eigh(
covariance_between_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , covariance_within_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , )
UpperCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = np.linalg.svd(lowerCamelCase )
UpperCAmelCase__ = svd_matrix[:, 0:dimensions]
UpperCAmelCase__ = np.dot(filtered_svd_matrix.T , lowerCamelCase )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase__ = 2
UpperCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase ) as error_info:
UpperCAmelCase__ = linear_discriminant_analysis(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def a_ ( ):
UpperCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase__ = 2
UpperCAmelCase__ = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase ) as error_info:
UpperCAmelCase__ = principal_component_analysis(lowerCamelCase , lowerCamelCase )
if not np.allclose(lowerCamelCase , lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = list(snake_case__ )
_snake_case : List[Any] = list(snake_case__ )
_snake_case : List[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
_snake_case : Any = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def UpperCAmelCase__ (snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = []
while True:
_snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ )
_snake_case : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
_snake_case : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case : Dict = """*"""
_snake_case : List[Any] = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
_snake_case : Optional[int] = list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ):
"""simple docstring"""
_snake_case : Optional[int] = []
for minterm in minterms:
_snake_case : Any = """"""
for _ in range(snake_case__ ):
_snake_case : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = list(snake_case__ )
_snake_case : List[str] = list(snake_case__ )
_snake_case : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : Any = []
_snake_case : Union[str, Any] = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
_snake_case : Tuple = 0
_snake_case : str = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
_snake_case : Union[str, Any] = j
if count == 1:
_snake_case : Union[str, Any] = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
_snake_case : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
_snake_case : Optional[int] = 0
_snake_case : str = -1
_snake_case : Any = 0
for i in range(len(snake_case__ ) ):
_snake_case : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_snake_case : Dict = count_n
_snake_case : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
_snake_case : Optional[Any] = 0
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
_snake_case : Any = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
_snake_case : Tuple = 1
return chart
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = int(input("""Enter the no. of variables\n""" ) )
_snake_case : List[str] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
_snake_case : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
_snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ )
_snake_case : str = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 64 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowerCAmelCase:
"""simple docstring"""
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return None
class _lowerCAmelCase:
"""simple docstring"""
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return None
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : str =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _a ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_lowerCamelCase , 'tf' , 1_2 , **_lowerCamelCase )
@require_torch
@slow
def _a ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_lowerCamelCase , 'pt' , 1_2 , **_lowerCamelCase )
@require_torch
@slow
def _a ( self ):
from transformers import BertModel
UpperCamelCase_: Dict = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(_lowerCamelCase ) )
vocab_file.flush()
UpperCamelCase_: List[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase_: Dict = BertModel(BertConfig(vocab_size=len(_lowerCamelCase ) ) )
model.save_pretrained(_lowerCamelCase )
self._test_export(_lowerCamelCase , 'pt' , 1_2 , _lowerCamelCase )
@require_tf
@slow
def _a ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase_: Any = self._test_export(_lowerCamelCase , 'tf' , 1_2 , **_lowerCamelCase )
UpperCamelCase_: List[Any] = quantize(Path(_lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_lowerCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _a ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase_: Any = self._test_export(_lowerCamelCase , 'pt' , 1_2 , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = quantize(_lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_lowerCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase_: Optional[Any] = Path(_lowerCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
return path
except Exception as e:
self.fail(_lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _a ( self ):
from transformers import BertModel
UpperCamelCase_: Optional[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
UpperCamelCase_: str = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_lowerCamelCase , _lowerCamelCase , 'pt' )
@require_tf
@require_tokenizers
@slow
def _a ( self ):
from transformers import TFBertModel
UpperCamelCase_: Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
UpperCamelCase_: Tuple = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_lowerCamelCase , _lowerCamelCase , 'tf' )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = FeatureExtractionPipeline(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = infer_shapes(_lowerCamelCase , _lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , _lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def _a ( self ):
UpperCamelCase_: Dict = ['input_ids', 'attention_mask', 'token_type_ids']
UpperCamelCase_: Tuple = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
UpperCamelCase_ ,UpperCamelCase_: List[str] = ensure_valid_input(FuncContiguousArgs() , _lowerCamelCase , _lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_lowerCamelCase ) , set(_lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_lowerCamelCase , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase_ ,UpperCamelCase_: Tuple = ensure_valid_input(FuncNonContiguousArgs() , _lowerCamelCase , _lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(len(_lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def _a ( self ):
UpperCamelCase_: Any = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() ) | 292 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 292 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ):
lowercase_ :dict[int, int] = {}
lowercase_ :Dict = 2
while True:
lowercase_ :Optional[Any] = factor_map.pop(__lowerCamelCase ,__lowerCamelCase )
if factor:
lowercase_ :str = factor + prime
while x in factor_map:
x += factor
lowercase_ :Any = factor
else:
lowercase_ :Dict = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __lowerCamelCase : float = 1e10 ):
lowercase_ :int = sieve()
lowercase_ :List[Any] = 1
while True:
lowercase_ :Optional[int] = next(__lowerCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 223 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ):
return int((input_a, input_a).count(0 ) == 0 )
def UpperCAmelCase_ ( ):
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 223 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = BlenderbotConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , )-> List[Any]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def UpperCAmelCase_ ( self , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModel(config=A_ ).get_decoder()
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = inputs_dict['head_mask']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ )[0]
UpperCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def A_( A : List[Any] , A : Tuple , A : Optional[Any] , A : List[str]=None , A : str=None , A : List[Any]=None , A : Dict=None , A : Any=None , ):
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(A , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = ["""My friends are cool but they eat too many carbs."""]
lowerCAmelCase_ = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 370 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 251 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCamelCase__ = _symbol_database.Default()
UpperCamelCase__ = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
UpperCamelCase__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCamelCase__ = None
UpperCamelCase__ = b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCamelCase__ = 4_5
UpperCamelCase__ = 1_5_8_1
UpperCamelCase__ = 1_5_1_7
UpperCamelCase__ = 1_5_7_0
UpperCamelCase__ = 1_5_8_4
UpperCamelCase__ = 1_7_9_3
UpperCamelCase__ = 1_7_9_5
UpperCamelCase__ = 1_9_1_6
UpperCamelCase__ = 1_8_6_4
UpperCamelCase__ = 1_9_0_5
UpperCamelCase__ = 1_9_1_9
UpperCamelCase__ = 2_4_2_9
UpperCamelCase__ = 2_2_0_8
UpperCamelCase__ = 2_4_1_8
UpperCamelCase__ = 2_3_2_3
UpperCamelCase__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 181 |
def __UpperCAmelCase ( __a : float ) -> float:
"""simple docstring"""
return 10 - x * x
def __UpperCAmelCase ( __a : float ,__a : float ) -> float:
"""simple docstring"""
if equation(__a ) * equation(__a ) >= 0:
raise ValueError('''Wrong space!''' )
_a : Dict = a
while (b - a) >= 0.01:
# Find middle point
_a : Any = (a + b) / 2
# Check if middle point is root
if equation(__a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__a ) * equation(__a ) < 0:
_a : str = c
else:
_a : Union[str, Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 235 | 0 |
def lowerCAmelCase_ ( __lowerCAmelCase )-> list[list[int]]:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
if len(lowerCamelCase_ ) == 1:
return [nums.copy()]
for _ in range(len(lowerCamelCase_ ) ):
UpperCAmelCase : int =nums.pop(0 )
UpperCAmelCase : int =permute(lowerCamelCase_ )
for perm in permutations:
perm.append(lowerCamelCase_ )
result.extend(lowerCamelCase_ )
nums.append(lowerCamelCase_ )
return result
def lowerCAmelCase_ ( __lowerCAmelCase )-> Dict:
'''simple docstring'''
def backtrack(__lowerCAmelCase ):
if start == len(lowerCamelCase_ ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
UpperCAmelCase : Dict =nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase : List[Any] =nums[i], nums[start] # backtrack
UpperCAmelCase : Optional[Any] =[]
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__snake_case = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 367 | import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __snake_case ( lowerCamelCase__ ):
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase : Tuple ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase : int ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase : Optional[int] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase : List[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase : List[Any] =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : Optional[Any] ='''1'''
UpperCAmelCase : List[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase : Any ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase : Union[str, Any] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase : Union[str, Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Any =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase : List[str] =self.get_env()
UpperCAmelCase : Any =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
UpperCAmelCase : int ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
UpperCAmelCase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase : Any =self.get_env()
UpperCAmelCase : List[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
UpperCAmelCase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : int ='''1'''
UpperCAmelCase : Optional[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict ='''
from transformers import pipeline
'''
UpperCAmelCase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
UpperCAmelCase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
UpperCAmelCase : Optional[int] =self.get_env()
UpperCAmelCase : int ='''1'''
UpperCAmelCase : Optional[int] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
UpperCAmelCase : List[str] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any ='''
from transformers import AutoModel
'''
UpperCAmelCase : Optional[Any] ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase : Optional[int] =self.get_env()
UpperCAmelCase : Optional[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : Any ='''1'''
UpperCAmelCase : Dict =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 78 | 0 |
"""simple docstring"""
# Imports
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
if red is not None:
UpperCAmelCase_ : Union[str, Any] = red
if green is not None:
UpperCAmelCase_ : Dict = green
if blue is not None:
UpperCAmelCase_ : Optional[int] = blue
if red_edge is not None:
UpperCAmelCase_ : Optional[int] = red_edge
if nir is not None:
UpperCAmelCase_ : str = nir
return True
def UpperCamelCase__ ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
UpperCAmelCase_ : str = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCamelCase__ ( self , lowercase_=0.08 , lowercase_=1.22 , lowercase_=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir - self.green
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCamelCase__ ( self , lowercase_=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCamelCase__ ( self , lowercase_=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase_ : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 61 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip_vision_model"
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =qkv_bias
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "instructblip_qformer"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =cross_attention_frequency
__UpperCamelCase =encoder_hidden_size
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip"
UpperCAmelCase__ : Optional[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]:
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase =InstructBlipVisionConfig(**A_ )
__UpperCamelCase =InstructBlipQFormerConfig(**A_ )
__UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase =self.text_config.tie_word_embeddings
__UpperCamelCase =self.text_config.is_encoder_decoder
__UpperCamelCase =num_query_tokens
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase =1.0
__UpperCamelCase =0.02
@classmethod
def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.qformer_config.to_dict()
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 62 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__A = logging.get_logger(__name__)
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
_snake_case = set()
_snake_case = []
def parse_line(_UpperCamelCase ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_snake_case = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_snake_case = '''\n'''.join(SCREAMING_SNAKE_CASE_ )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE_ )
buffer.clear()
continue
else:
_snake_case = line.strip()
buffer.append(SCREAMING_SNAKE_CASE_ )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE_ ):
_snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
_snake_case = set()
_snake_case = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_(_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return values.split(''',''' )
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
__A = parser.parse_args()
__A = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__A = extract_warnings(args.output_dir, args.targets)
__A = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 366 |
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=__lowercase ):
UpperCamelCase_ : Optional[int] = ["speech"]
def __init__( self : str , *A__ : List[str] , **A__ : Tuple ) -> Optional[Any]:
requires_backends(self , ['''speech'''] )
class lowercase_ ( metaclass=__lowercase ):
UpperCamelCase_ : Optional[Any] = ["speech"]
def __init__( self : Dict , *A__ : int , **A__ : int ) -> Tuple:
requires_backends(self , ['''speech'''] )
| 278 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Dict ):
"""simple docstring"""
if not len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
SCREAMING_SNAKE_CASE : Dict = equationa
SCREAMING_SNAKE_CASE : Any = equationa
# Calculate the determinants of the matrices
SCREAMING_SNAKE_CASE : Optional[int] = aa * ba - aa * ba
SCREAMING_SNAKE_CASE : Optional[int] = ca * ba - ca * ba
SCREAMING_SNAKE_CASE : int = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
SCREAMING_SNAKE_CASE : Any = determinant_x / determinant
SCREAMING_SNAKE_CASE : Dict = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 251 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowercase__ ( lowercase ):
lowercase__ = """mvp"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] ,lowerCamelCase__ : Any=50267 ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[int]=800 ,**lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Dict = d_model
_UpperCamelCase : Any = encoder_ffn_dim
_UpperCamelCase : Dict = encoder_layers
_UpperCamelCase : Optional[Any] = encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : int = decoder_attention_heads
_UpperCamelCase : str = dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : Dict = activation_function
_UpperCamelCase : List[str] = init_std
_UpperCamelCase : Dict = encoder_layerdrop
_UpperCamelCase : Tuple = decoder_layerdrop
_UpperCamelCase : Optional[int] = classifier_dropout
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = encoder_layers
_UpperCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Any = use_prompt
_UpperCamelCase : Optional[int] = prompt_length
_UpperCamelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 83 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase = 50 ) -> int:
snake_case_ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCamelCase__ ( lowerCAmelCase = 1_50_00_00 ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(lowerCAmelCase )
_lowerCAmelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCAmelCase , 2 ):
if gcd(lowerCAmelCase , lowerCAmelCase ) > 1:
continue
_lowerCAmelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCAmelCase , limit + 1 , lowerCAmelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 |
"""simple docstring"""
import baseaa
def UpperCamelCase ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return baseaa.baadecode(UpperCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
UpperCamelCase_ = 'Hello World!'
UpperCamelCase_ = baseaa_encode(test)
print(encoded)
UpperCamelCase_ = baseaa_decode(encoded)
print(decoded) | 243 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCamelCase = TypeVar("""_T""")
class lowercase__ ( Generic[_T] ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Iterable[_T] | None = None ) -> None:
'''simple docstring'''
UpperCAmelCase_ = list(iterable or [] )
UpperCAmelCase_ = []
def __len__( self : Optional[int] ) -> int:
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : _T ) -> None:
'''simple docstring'''
self._stacka.append(_UpperCAmelCase )
def lowercase__ ( self : Dict ) -> _T:
'''simple docstring'''
UpperCAmelCase_ = self._stacka.pop
UpperCAmelCase_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ = '''ViltImageProcessor'''
UpperCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Dict=None , **lowercase_ : Any ):
lowercase_ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase_ , )
lowercase_ : Union[str, Any] = kwargs.pop("""feature_extractor""" )
lowercase_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase_ , lowercase_ )
lowercase_ : Dict = self.image_processor
def __call__( self : int , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : int , ):
lowercase_ : str = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
lowercase_ : int = self.image_processor(lowercase_ , return_tensors=lowercase_ )
encoding.update(lowercase_ )
return encoding
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : int ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , *lowercase_ : Any , **lowercase_ : Any ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = self.tokenizer.model_input_names
lowercase_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase_ , )
return self.image_processor
| 239 | '''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowercase : int = sys.version_info >= (3, 10)
def lowerCamelCase ( UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Union[str, Any]=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=UpperCAmelCase__ )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''})
@dataclass
class __magic_name__ :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
UpperCamelCase__ = 42
@dataclass
class __magic_name__ :
UpperCamelCase__ = "toto"
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class __magic_name__ :
UpperCamelCase__ = "toto"
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
@dataclass
class __magic_name__ :
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[1, 2, 3])
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __magic_name__ :
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''})
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
if is_python_no_less_than_3_10:
@dataclass
class __magic_name__ :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase_ : List[str] = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""}
lowercase_ : Any = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowercase_ ) and yy.get("""choices""" , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowercase_ ) , yy["""type"""](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = HfArgumentParser(lowercase_ )
lowercase_ : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--bar""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--baz""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--flag""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : List[str] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((lowercase_) , ) : Optional[Any] = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Any = HfArgumentParser(lowercase_ )
lowercase_ : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowercase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowercase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ )
lowercase_ : List[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase_ : List[str] = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : str = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Optional[Any] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Optional[Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Dict = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : int = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : str = HfArgumentParser(lowercase_ )
lowercase_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase_ : Union[str, Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase_ : int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase_ : int = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase_ : List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
lowercase_ : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
@dataclass
class __magic_name__ :
UpperCamelCase__ = "toto"
lowercase_ : Optional[int] = HfArgumentParser(lowercase_ )
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : List[str] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase_ : List[Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : int = HfArgumentParser(lowercase_ )
lowercase_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowercase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : int = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase_ : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowercase_ , type=lowercase_ )
expected.add_argument("""--bar""" , default=lowercase_ , type=lowercase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowercase_ , type=lowercase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowercase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowercase_ )
lowercase_ : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase_ : Tuple = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
lowercase_ : List[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : int = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--required_str""" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , )
expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : str = HfArgumentParser(lowercase_ )
lowercase_ : Optional[int] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowercase_ : Optional[Any] = parser.parse_dict(lowercase_ )[0]
lowercase_ : Dict = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : Optional[int] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = HfArgumentParser(lowercase_ )
lowercase_ : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = os.path.join(lowercase_ , """temp_json""" )
os.mkdir(lowercase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowercase_ , lowercase_ )
lowercase_ : str = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
lowercase_ : Tuple = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : str = HfArgumentParser(lowercase_ )
lowercase_ : List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Tuple = os.path.join(lowercase_ , """temp_yaml""" )
os.mkdir(lowercase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowercase_ , lowercase_ )
lowercase_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
lowercase_ : Any = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : str = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 239 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[Any] , *snake_case : List[Any] , **snake_case : List[Any] ):
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , snake_case , )
super().__init__(*snake_case , **snake_case )
| 296 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 32 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[Any] , lowercase_ : str ):
lowercase_ : int = data
def __iter__( self : int ):
for element in self.data:
yield element
def lowerCamelCase ( UpperCAmelCase__ : Any=True ) -> Any:
lowercase_ : Optional[int] = Accelerator(even_batches=UpperCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> Optional[Any]:
if iterable:
lowercase_ : Dict = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
else:
lowercase_ : Union[str, Any] = TensorDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
lowercase_ : Any = DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : Any = accelerator.prepare(UpperCAmelCase__ )
return dl
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , ) -> int:
lowercase_ : List[str] = create_dataloader(accelerator=UpperCAmelCase__ , dataset_size=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCamelCase ( ) -> int:
lowercase_ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : Optional[int] = create_accelerator(even_batches=UpperCAmelCase__ )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Dict = torch.nn.Linear(1 , 1 )
lowercase_ : Optional[Any] = accelerator.prepare(UpperCAmelCase__ )
lowercase_ : List[Any] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase_ : Optional[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCAmelCase__ ):
lowercase_ : Any = ddp_model(batch[0].float() )
lowercase_ : List[str] = output.sum()
loss.backward()
batch_idxs.append(UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> List[str]:
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCamelCase ( ) -> Any:
lowercase_ : str = True
lowercase_ : Tuple = False
lowercase_ : str = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = torch.nn.Linear(1 , 1 )
lowercase_ : Any = accelerator.prepare(UpperCAmelCase__ )
lowercase_ : Optional[int] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase_ : List[Any] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = train_dl.batch_sampler.even_batches
lowercase_ : List[str] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase ( ) -> Dict:
lowercase_ : str = True
lowercase_ : Optional[Any] = False
lowercase_ : Union[str, Any] = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase_ : Optional[int] = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
lowercase_ : List[str] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
lowercase_ : Optional[Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[Any] = create_accelerator()
lowercase_ : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase_ : List[Any] = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCamelCase ( ) -> List[str]:
lowercase_ : List[Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
lowercase_ : List[Any] = accelerator.state.distributed_type
lowercase_ : Union[str, Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase__ )
lowercase_ : str = original_state
if __name__ == "__main__":
main()
| 239 | 0 |
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(snake_case__ , snake_case__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 2
while digits < n:
index += 1
UpperCAmelCase_ = len(str(fibonacci(snake_case__ ) ) )
return index
def A ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(snake_case__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 1 |
import math
def lowerCAmelCase__ ( a__ = 100 ) ->int:
'''simple docstring'''
_UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
_UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 63 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase__ = {'''facebook/blenderbot_small-90M''': 512}
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
_UpperCamelCase = set(a__ )
return pairs
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any]="__start__" , lowercase_ : Optional[int]="__end__" , lowercase_ : List[Any]="__unk__" , lowercase_ : List[str]="__null__" , **lowercase_ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_)
with open(lowercase_ , encoding="utf-8") as vocab_handle:
_UpperCamelCase = json.load(lowercase_)
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase_ , encoding="utf-8") as merges_handle:
_UpperCamelCase = merges_handle.read().split("\n")[1:-1]
_UpperCamelCase = [tuple(merge.split()) for merge in merges]
_UpperCamelCase = dict(zip(lowercase_ , range(len(lowercase_))))
_UpperCamelCase = {}
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return len(self.encoder)
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCamelCase = re.sub("([.,!?()])" , R" \1" , lowercase_)
_UpperCamelCase = re.sub("(')" , R" \1 " , lowercase_)
_UpperCamelCase = re.sub(R"\s{2,}" , " " , lowercase_)
if "\n" in token:
_UpperCamelCase = token.replace("\n" , " __newln__")
_UpperCamelCase = token.split(" ")
_UpperCamelCase = []
for token in tokens:
if not len(lowercase_):
continue
_UpperCamelCase = token.lower()
_UpperCamelCase = tuple(lowercase_)
_UpperCamelCase = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_UpperCamelCase = get_pairs(lowercase_)
if not pairs:
words.append(lowercase_)
continue
while True:
_UpperCamelCase = min(lowercase_ , key=lambda lowercase_: self.bpe_ranks.get(lowercase_ , float("inf")))
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(lowercase_):
try:
_UpperCamelCase = word.index(lowercase_ , lowercase_)
new_word.extend(word[i:j])
_UpperCamelCase = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowercase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCamelCase = tuple(lowercase_)
_UpperCamelCase = new_word
if len(lowercase_) == 1:
break
else:
_UpperCamelCase = get_pairs(lowercase_)
_UpperCamelCase = "@@ ".join(lowercase_)
_UpperCamelCase = word[:-4]
_UpperCamelCase = word
words.append(lowercase_)
return " ".join(lowercase_)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = re.findall(R"\S+\n?" , lowercase_)
for token in words:
split_tokens.extend(list(self.bpe(lowercase_).split(" ")))
return split_tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
_UpperCamelCase = token.lower()
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token))
def __UpperCAmelCase ( self : Any , lowercase_ : int) -> str:
"""simple docstring"""
return self.decoder.get(lowercase_ , self.unk_token)
def __UpperCAmelCase ( self : Any , lowercase_ : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase = " ".join(lowercase_).replace("@@ " , "").strip()
return out_string
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowercase_ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_) + "\n")
_UpperCamelCase = 0
with open(lowercase_ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_: kv[1]):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!")
_UpperCamelCase = token_index
writer.write(" ".join(lowercase_) + "\n")
index += 1
return vocab_file, merge_file
| 63 | 1 |
'''simple docstring'''
_lowerCAmelCase = 8.314_462 # Unit - J mol-1 K-1
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 298 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298 | 1 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= 0
lowercase__ : str= [0]
lowercase__ : Tuple= [0]
lowercase__ : int= len(snake_case__ )
self.assertEqual(k.knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , 0 )
lowercase__ : List[str]= [60]
lowercase__ : Any= [10]
lowercase__ : Optional[int]= len(snake_case__ )
self.assertEqual(k.knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , 0 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 3
lowercase__ : str= [1, 2, 3]
lowercase__ : Optional[Any]= [3, 2, 1]
lowercase__ : int= len(snake_case__ )
self.assertEqual(k.knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , 5 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= 50
lowercase__ : List[Any]= [60, 100, 120]
lowercase__ : List[str]= [10, 20, 30]
lowercase__ : str= len(snake_case__ )
self.assertEqual(k.knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 150 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase__() ->List[Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowercase__ : Any= Path(A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowercase__ : Any= [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , A , with_cuda=A , extra_include_paths=[str(A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 150 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowercase ( __UpperCAmelCase):
def __init__( self : Tuple , *_lowerCamelCase : List[str] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 167 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __snake_case ( yaml.SafeLoader ):
'''simple docstring'''
def UpperCAmelCase__ ( self : int , A : Optional[int] ):
__snake_case: Tuple = [self.constructed_objects[key_node] for key_node, _ in node.value]
__snake_case: str = [tuple(A ) if isinstance(A , A ) else key for key in keys]
__snake_case: Optional[Any] = Counter(A )
__snake_case: Optional[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] , A : List[str]=False ):
__snake_case: Tuple = super().construct_mapping(A , deep=A )
self._check_no_duplicates_on_constructed_node(A )
return mapping
def A__ ( SCREAMING_SNAKE_CASE__) -> Tuple[Optional[str], str]:
__snake_case: List[Any] = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__snake_case: str = full_content[1:].index("""---""") + 1
__snake_case: Union[str, Any] = """\n""".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(SCREAMING_SNAKE_CASE__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , A : Path ):
with open(A , encoding="""utf-8""" ) as readme_file:
__snake_case: Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A )
else:
return cls()
def UpperCAmelCase__ ( self : Dict , A : Path ):
if path.exists():
with open(A , encoding="""utf-8""" ) as readme_file:
__snake_case: Optional[int] = readme_file.read()
else:
__snake_case: Optional[Any] = None
__snake_case: str = self._to_readme(A )
with open(A , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(A )
def UpperCAmelCase__ ( self : Any , A : Optional[str] = None ):
if readme_content is not None:
__snake_case: Optional[Any] = _split_yaml_from_readme(A )
__snake_case: Optional[int] = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
__snake_case: int = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , A : str ):
__snake_case: List[str] = yaml.load(A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__snake_case: int = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A )
def UpperCAmelCase__ ( self : List[str] ):
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A , allow_unicode=A , encoding="""utf-8""" , ).decode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__UpperCAmelCase : Tuple = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__UpperCAmelCase : Union[str, Any] = ap.parse_args()
__UpperCAmelCase : Tuple = Path(args.readme_filepath)
__UpperCAmelCase : Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 363 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293 | 0 |
"""simple docstring"""
from math import factorial
_lowercase = {str(digit): factorial(digit) for digit in range(10)}
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case__ ) )
def _snake_case ( snake_case__ : int = 60 , snake_case__ : int = 100_0000 ):
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
A = 0
# the cached sizes of the previous chains
A = {}
for start_chain_element in range(1 , snake_case__ ):
# The temporary set will contain the elements of the chain
A = set()
A = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(snake_case__ )
chain_set_length += 1
A = digit_factorial_sum(snake_case__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""") | 74 |
import argparse
import os
import re
lowercase_ = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(R'\[([^\]]+)\]')
def a ( A__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowercase =_re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def a ( A__ : Optional[Any] , A__ : Dict="" , A__ : Union[str, Any]=None , A__ : Tuple=None ) -> Dict:
"""simple docstring"""
_lowercase =0
_lowercase =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
_lowercase =['\n'.join(lines[:index] )]
else:
_lowercase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase =[lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(A__ ) )
if index < len(A__ ) - 1:
_lowercase =[lines[index + 1]]
index += 1
else:
_lowercase =[]
else:
blocks.append('\n'.join(A__ ) )
_lowercase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('\n'.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def a ( A__ : int ) -> Union[str, Any]:
"""simple docstring"""
def _inner(A__ : Any ):
return key(A__ ).lower().replace('_' , '' )
return _inner
def a ( A__ : Any , A__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
def noop(A__ : Optional[int] ):
return x
if key is None:
_lowercase =noop
# Constants are all uppercase, they go first.
_lowercase =[obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase =[obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase =[obj for obj in objects if not key(A__ )[0].isupper()]
_lowercase =ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def a ( A__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(A__ : Optional[int] ):
_lowercase =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase =[part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(A__ )] ) + "]"
_lowercase =import_statement.split('\n' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase =2 if lines[1].strip() == '[' else 1
_lowercase =[(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase =sort_objects(A__ , key=lambda A__ : x[1] )
_lowercase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase =_re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase =[part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
_lowercase =get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
_lowercase =_re_bracket_content.sub(_replace , A__ )
return import_statement
def a ( A__ : Dict , A__ : int=True ) -> Optional[Any]:
"""simple docstring"""
with open(A__ , encoding='utf-8' ) as f:
_lowercase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase =split_code_in_indented_blocks(
A__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase =main_blocks[block_idx]
_lowercase =block.split('\n' )
# Get to the start of the imports.
_lowercase =0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase =len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase ='\n'.join(block_lines[line_idx:-1] )
_lowercase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase =split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase =_re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase =[(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase =[(i, key) for i, key in enumerate(A__ ) if key is not None]
_lowercase =[x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase =0
_lowercase =[]
for i in range(len(A__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
_lowercase ='\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
def a ( A__ : List[Any]=True ) -> List[str]:
"""simple docstring"""
_lowercase =[]
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
_lowercase =sort_imports(os.path.join(A__ , '__init__.py' ) , check_only=A__ )
if result:
_lowercase =[os.path.join(A__ , '__init__.py' )]
if len(A__ ) > 0:
raise ValueError(F'''Would overwrite {len(A__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 205 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 371 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase : Optional[Any] = 256047
lowerCAmelCase : List[Any] = 256145
@require_sentencepiece
@require_tokenizers
class __lowercase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = NllbTokenizer
_UpperCAmelCase : Any = NllbTokenizerFast
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Dict = {}
def _SCREAMING_SNAKE_CASE ( self : Dict):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: Optional[Any] = NllbTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: int = NllbTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: Any = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
SCREAMING_SNAKE_CASE_: List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer_r.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
SCREAMING_SNAKE_CASE_: List[str] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__)
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_: Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: str = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__)
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_: Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: int = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: List[str] = tokenizer_r.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
if not self.test_seqaseq:
return
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
SCREAMING_SNAKE_CASE_: Union[str, Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE_: Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
SCREAMING_SNAKE_CASE_: Dict = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCAmelCase__ , tgt_texts=lowerCAmelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 10)
# max_target_length will default to max_length if not specified
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.prepare_seqaseq_batch(
lowerCAmelCase__ , tgt_texts=lowerCAmelCase__ , max_length=3 , return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 3)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCAmelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3)
self.assertNotIn("decoder_input_ids" , lowerCAmelCase__)
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.")
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass
def _SCREAMING_SNAKE_CASE ( self : int):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
SCREAMING_SNAKE_CASE_: List[Any] = [AddedToken("<special>" , lstrip=lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = tokenizer_r.encode("Hey this is a <special> token")
SCREAMING_SNAKE_CASE_: Tuple = tokenizer_r.encode("<special>" , add_special_tokens=lowerCAmelCase__)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
SCREAMING_SNAKE_CASE_: str = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: int = self.tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer_p.encode("Hey this is a <special> token")
SCREAMING_SNAKE_CASE_: Dict = tokenizer_cr.encode("Hey this is a <special> token")
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = '''facebook/nllb-200-distilled-600M'''
_UpperCAmelCase : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_UpperCAmelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_UpperCAmelCase : Optional[int] = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any]):
SCREAMING_SNAKE_CASE_: NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn")
SCREAMING_SNAKE_CASE_: Dict = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : int):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_6001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_6002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_6057)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids)
# fmt: off
SCREAMING_SNAKE_CASE_: Tuple = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__).input_ids[0]
self.assertEqual(ids[-1] , 2)
self.assertEqual(ids[0] , lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [25_6203, 3])
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = NllbTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_: str = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"])
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual((2, 15) , batch.input_ids.shape)
self.assertEqual((2, 15) , batch.attention_mask.shape)
SCREAMING_SNAKE_CASE_: Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt")
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt")
SCREAMING_SNAKE_CASE_: List[str] = targets['''input_ids''']
SCREAMING_SNAKE_CASE_: Optional[Any] = shift_tokens_right(
lowerCAmelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn")
self.assertEqual(
nested_simplify(lowerCAmelCase__) , {
# A, test, EOS, en_XX
"input_ids": [[25_6047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_6057,
} , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: List[str] = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn")
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047])
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn")
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2])
| 13 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : int = 10**12) -> int:
'''simple docstring'''
__lowercase = 1
__lowercase = 0
__lowercase = 1
__lowercase = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"{solution() = }")
| 361 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _A ( ) -> Tuple:
'''simple docstring'''
__lowercase = _ask_options(
"In which compute environment are you running?", ["This machine", "AWS (Amazon SageMaker)"], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase = get_sagemaker_input()
else:
__lowercase = get_cluster_input()
return config
def _A ( UpperCamelCase_ : Union[str, Any]=None) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser("config", description=UpperCamelCase_)
else:
__lowercase = argparse.ArgumentParser("Accelerate config command", description=UpperCamelCase_)
parser.add_argument(
"--config_file", default=UpperCamelCase_, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_)
return parser
def _A ( UpperCamelCase_ : Dict) -> str:
'''simple docstring'''
__lowercase = get_user_input()
if args.config_file is not None:
__lowercase = args.config_file
else:
if not os.path.isdir(UpperCamelCase_):
os.makedirs(UpperCamelCase_)
__lowercase = default_yaml_config_file
if config_file.endswith(".json"):
config.to_json_file(UpperCamelCase_)
else:
config.to_yaml_file(UpperCamelCase_)
print(F"""accelerate configuration saved at {config_file}""")
def _A ( ) -> Optional[Any]:
'''simple docstring'''
__lowercase = config_command_parser()
__lowercase = parser.parse_args()
config_command(UpperCamelCase_)
if __name__ == "__main__":
main()
| 144 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( snake_case_ ):
__lowercase : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = 8 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase__: Optional[int] = do_rescale
lowercase__: Tuple = rescale_factor
lowercase__: List[str] = do_pad
lowercase__: str = pad_size
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
'''simple docstring'''
lowercase__ , lowercase__: Any = get_image_size(lowerCAmelCase__ )
lowercase__: List[str] = (old_height // size + 1) * size - old_height
lowercase__: int = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase__: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__: Optional[int] = do_pad if do_pad is not None else self.do_pad
lowercase__: Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__: List[Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowercase__: Any = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_rescale:
lowercase__: Dict = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_pad:
lowercase__: Optional[int] = [self.pad(lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
lowercase__: str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
lowercase__: int = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 196 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : "DiagonalGaussianDistribution"
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = True
@register_to_config
def __init__( self: Union[str, Any] , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 3 , UpperCamelCase_: Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase_: Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase_: Tuple[int] = (64,) , UpperCamelCase_: int = 1 , UpperCamelCase_: str = "silu" , UpperCamelCase_: int = 4 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , UpperCamelCase_: float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , act_fn=UpperCamelCase_ , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Any=False ):
if isinstance(UpperCamelCase_ , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: bool = True ):
__lowerCamelCase = use_tiling
def lowerCAmelCase__ ( self: Optional[int] ):
self.enable_tiling(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = True
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = {}
def fn_recursive_add_processors(UpperCamelCase_: str , UpperCamelCase_: torch.nn.Module , UpperCamelCase_: Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase_ , """set_processor""" ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(UpperCamelCase_: str , UpperCamelCase_: torch.nn.Module , UpperCamelCase_: int ):
if hasattr(UpperCamelCase_ , """set_processor""" ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(UpperCamelCase_ ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(UpperCamelCase_ )
else:
__lowerCamelCase = self.encoder(UpperCamelCase_ )
__lowerCamelCase = self.quant_conv(UpperCamelCase_ )
__lowerCamelCase = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
__lowerCamelCase = self.post_quant_conv(UpperCamelCase_ )
__lowerCamelCase = self.decoder(UpperCamelCase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
@apply_forward_hook
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(UpperCamelCase_ ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(UpperCamelCase_ )
else:
__lowerCamelCase = self._decode(UpperCamelCase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] ):
__lowerCamelCase = min(a.shape[2] , b.shape[2] , UpperCamelCase_ )
for y in range(UpperCamelCase_ ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = min(a.shape[3] , b.shape[3] , UpperCamelCase_ )
for x in range(UpperCamelCase_ ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , UpperCamelCase_ ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , UpperCamelCase_ ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(UpperCamelCase_ )
__lowerCamelCase = self.quant_conv(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
__lowerCamelCase = []
for i, row in enumerate(UpperCamelCase_ ):
__lowerCamelCase = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , UpperCamelCase_ ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , UpperCamelCase_ ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(UpperCamelCase_ )
__lowerCamelCase = self.decoder(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
__lowerCamelCase = []
for i, row in enumerate(UpperCamelCase_ ):
__lowerCamelCase = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[torch.Generator] = None , ):
__lowerCamelCase = sample
__lowerCamelCase = self.encode(UpperCamelCase_ ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=UpperCamelCase_ )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ ) | 358 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
'''simple docstring'''
__lowerCamelCase : int = CustomTokenizer
pass
| 116 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = limit + 1
UpperCAmelCase_ = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 106 | 0 |
'''simple docstring'''
import random
class a_ :
@staticmethod
def lowercase__ ( lowercase : str ):
"""simple docstring"""
lowercase_ :Union[str, Any] = [ord(lowercase ) for i in text]
lowercase_ :List[str] = []
lowercase_ :Optional[Any] = []
for i in plain:
lowercase_ :Optional[int] = random.randint(1 , 300 )
lowercase_ :List[Any] = (i + k) * k
cipher.append(lowercase )
key.append(lowercase )
return cipher, key
@staticmethod
def lowercase__ ( lowercase : list[int] , lowercase : list[int] ):
"""simple docstring"""
lowercase_ :Optional[int] = []
for i in range(len(lowercase ) ):
lowercase_ :Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase ) )
return "".join(lowercase )
if __name__ == "__main__":
lowerCAmelCase , lowerCAmelCase : List[Any] =Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 223 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : int ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 | 1 |
"""simple docstring"""
_lowerCAmelCase : List[Any] = range(2, 20 + 1)
_lowerCAmelCase : Dict = [10**k for k in range(ks[-1] + 1)]
_lowerCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase : int = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) )
_UpperCAmelCase : Union[str, Any] = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) ) )
_UpperCAmelCase , _UpperCAmelCase : Tuple = 0, 0
_UpperCAmelCase : Any = n - i
_UpperCAmelCase : Optional[Any] = memo.get(SCREAMING_SNAKE_CASE__ )
if sub_memo is not None:
_UpperCAmelCase : int = sub_memo.get(SCREAMING_SNAKE_CASE__ )
if jumps is not None and len(SCREAMING_SNAKE_CASE__ ) > 0:
# find and make the largest jump without going over
_UpperCAmelCase : Optional[Any] = -1
for _k in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCAmelCase : List[str] = _k
break
if max_jump >= 0:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCAmelCase : List[Any] = diff + c
for j in range(min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) ):
_UpperCAmelCase , _UpperCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE__ , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
_UpperCAmelCase : str = []
else:
_UpperCAmelCase : int = {c: []}
_UpperCAmelCase : Dict = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = next_term(SCREAMING_SNAKE_CASE__ , k - 1 , i + dn , SCREAMING_SNAKE_CASE__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCAmelCase , _UpperCAmelCase : int = compute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + dn , SCREAMING_SNAKE_CASE__ )
diff += _diff
dn += terms_jumped
_UpperCAmelCase : Dict = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCAmelCase : List[Any] = 0
while j < len(SCREAMING_SNAKE_CASE__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE__ , (diff, dn, k) )
return (diff, dn)
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE__ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCAmelCase : Any = i
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCAmelCase : Dict = ds_c + ds_b
diff += addend
_UpperCAmelCase : Tuple = 0
for j in range(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Any = a_i[j] + addend
_UpperCAmelCase , _UpperCAmelCase : List[str] = divmod(SCREAMING_SNAKE_CASE__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return diff, i - start_i
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCAmelCase : Dict = digits[j] + addend
if s >= 10:
_UpperCAmelCase , _UpperCAmelCase : int = divmod(SCREAMING_SNAKE_CASE__ , 10 )
_UpperCAmelCase : Union[str, Any] = addend // 10 + quotient
else:
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : Optional[int] = addend // 10
if addend == 0:
break
while addend > 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = divmod(SCREAMING_SNAKE_CASE__ , 10 )
digits.append(SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int = 10**15 ) -> int:
'''simple docstring'''
_UpperCAmelCase : Any = [1]
_UpperCAmelCase : str = 1
_UpperCAmelCase : int = 0
while True:
_UpperCAmelCase , _UpperCAmelCase : Tuple = next_term(SCREAMING_SNAKE_CASE__ , 20 , i + dn , SCREAMING_SNAKE_CASE__ )
dn += terms_jumped
if dn == n - i:
break
_UpperCAmelCase : int = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 202 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : int = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Dict = mask_ratio
UpperCAmelCase : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Dict = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = TFViTMAEModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
UpperCAmelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : int = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Tuple = config_and_inputs
UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Dict = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase : List[Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Tuple = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = TFViTMAEModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = copy.deepcopy(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : str = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = outputs_dict[0].numpy()
UpperCAmelCase : Dict = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = v.numpy()
else:
UpperCAmelCase : str = np.array(_SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = prepare_numpy_arrays(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase : List[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : Any = tf_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_SCREAMING_SNAKE_CASE )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_SCREAMING_SNAKE_CASE , """_keras_serializable""" , _SCREAMING_SNAKE_CASE )
}
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : List[Any] = main_layer_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : str = tf.keras.Model(_SCREAMING_SNAKE_CASE , outputs=main_layer(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE , """keras_model.h5""" )
model.save(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = tf.keras.models.load_model(
_SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_SCREAMING_SNAKE_CASE , tf.keras.Model )
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = outputs.last_hidden_state.numpy()
UpperCAmelCase : int = 0
else:
UpperCAmelCase : Optional[Any] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Tuple = after_outputs["""last_hidden_state"""].numpy()
UpperCAmelCase : Optional[int] = 0
else:
UpperCAmelCase : str = after_outputs["""logits"""].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : List[Any] = model_class.from_config(model.config )
UpperCAmelCase : List[Any] = new_model(_SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Union[str, Any] = new_model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _snake_case ( ):
UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Optional[int] = prepare_img()
UpperCAmelCase : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : int = ViTMAEConfig()
UpperCAmelCase : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : str = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
| 109 | '''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
lowercase : int =field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase : bool =field(
default=lowercase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase : bool =field(
default=lowercase__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowercase_ :Optional[Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase_ :List[str] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : str =field(
default=lowercase__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase : bool =field(
default=lowercase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase : bool =field(
default=lowercase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowercase_ :Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ :List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ :str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ :int = training_args.get_process_log_level()
logger.setLevel(_a )
datasets.utils.logging.set_verbosity(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowercase_ :int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ :Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ :Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase_ :int = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase_ :Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowercase_ :List[Any] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase_ :List[Any] = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowercase_ :Any = load_dataset('''csv''' , data_files=_a , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase_ :List[Any] = load_dataset('''json''' , data_files=_a , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase_ :Any = raw_datasets['''train'''].features['''label'''].names
lowercase_ :int = len(_a )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ :Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase_ :Optional[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_a , )
lowercase_ :int = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase_ :str = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase_ :int = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase_ :List[Any] = {'''Refused''': 0, '''Entailed''': 1}
lowercase_ :Optional[int] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowercase_ :List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_a ):
# Tokenize the texts
def _convert_table_text_to_pandas(_a ):
lowercase_ :str = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowercase_ :List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase_ :Tuple = examples['''statement''']
lowercase_ :List[Any] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowercase_ :Any = tokenizer(_a , _a , padding=_a , max_length=_a , truncation=_a )
lowercase_ :Optional[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowercase_ :Union[str, Any] = raw_datasets.map(
_a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowercase_ :Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowercase_ :Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowercase_ :Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowercase_ :Optional[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowercase_ :Optional[int] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowercase_ :Dict = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_a ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a ):
lowercase_ :str = p.predictions[0] if isinstance(p.predictions , _a ) else p.predictions
lowercase_ :Optional[Any] = np.argmax(_a , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase_ :str = default_data_collator
elif training_args.fpaa:
lowercase_ :Union[str, Any] = DataCollatorWithPadding(_a , pad_to_multiple_of=8 )
else:
lowercase_ :Dict = None
# Initialize our Trainer
lowercase_ :Union[str, Any] = Trainer(
model=_a , args=_a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_a , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
lowercase_ :int = None
if training_args.resume_from_checkpoint is not None:
lowercase_ :Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ :Optional[Any] = last_checkpoint
lowercase_ :Tuple = trainer.train(resume_from_checkpoint=_a )
lowercase_ :Dict = train_result.metrics
lowercase_ :int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_a )
)
lowercase_ :Union[str, Any] = min(_a , len(_a ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _a )
trainer.save_metrics('''train''' , _a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase_ :Dict = trainer.evaluate(eval_dataset=_a )
lowercase_ :List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_a )
lowercase_ :Optional[Any] = min(_a , len(_a ) )
trainer.log_metrics('''eval''' , _a )
trainer.save_metrics('''eval''' , _a )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase_ :Tuple = predict_dataset.remove_columns('''label''' )
lowercase_ :Union[str, Any] = trainer.predict(_a , metric_key_prefix='''predict''' ).predictions
lowercase_ :Dict = np.argmax(_a , axis=1 )
lowercase_ :str = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(_a , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_a ):
lowercase_ :str = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowercase_ :List[Any] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 252 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
return getitem, k
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
return setitem, k, v
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return delitem, k
def UpperCamelCase ( _a , _a , *_a ) -> Any:
'''simple docstring'''
try:
return fun(_a , *_a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : List[Any] = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
SCREAMING_SNAKE_CASE : Tuple = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
SCREAMING_SNAKE_CASE : Any = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
SCREAMING_SNAKE_CASE : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
lowercase_ :Optional[Any] = HashMap(initial_block_size=4 )
lowercase_ :Optional[int] = {}
for _, (fun, *args) in enumerate(_a ):
lowercase_ , lowercase_ :List[str] = _run_operation(_a , _a , *_a )
lowercase_ , lowercase_ :List[str] = _run_operation(_a , _a , *_a )
assert my_res == py_res
assert str(_a ) == str(_a )
assert set(_a ) == set(_a )
assert len(_a ) == len(_a )
assert set(my.items() ) == set(py.items() )
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
def is_public(_a ) -> bool:
return not name.startswith('''_''' )
lowercase_ :Dict = {name for name in dir({} ) if is_public(_a )}
lowercase_ :Dict = {name for name in dir(HashMap() ) if is_public(_a )}
assert dict_public_names > hash_public_names
| 252 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _snake_case :
lowerCAmelCase_ : int
lowerCAmelCase_ : TreeNode | None = None
lowerCAmelCase_ : TreeNode | None = None
_SCREAMING_SNAKE_CASE : List[str] = namedtuple("CoinsDistribResult", "moves excess")
def UpperCamelCase_( snake_case : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case ) != count_coins(snake_case ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ = get_distrib(node.left )
snake_case_ , snake_case_ = get_distrib(node.right )
snake_case_ = 1 - left_distrib_excess
snake_case_ = 1 - right_distrib_excess
snake_case_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case )
+ abs(snake_case )
)
snake_case_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case , snake_case )
return get_distrib(snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = KandinskyVaaControlnetPipeline
UpperCAmelCase_ = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ = False
@property
def snake_case_ (self ) -> Tuple:
return 32
@property
def snake_case_ (self ) -> Optional[int]:
return 32
@property
def snake_case_ (self ) -> int:
return self.time_input_dim
@property
def snake_case_ (self ) -> Dict:
return self.time_input_dim * 4
@property
def snake_case_ (self ) -> List[str]:
return 1_00
@property
def snake_case_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase = UNetaDConditionModel(**__a )
return model
@property
def snake_case_ (self ) -> Dict:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type="epsilon" , thresholding=__a , )
UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ (self , __a , __a=0 ) -> Any:
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create hint
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCamelCase = torch.manual_seed(__a )
else:
UpperCamelCase = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def snake_case_ (self ) -> int:
UpperCamelCase = "cpu"
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**__a )
UpperCamelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = pipe(**self.get_dummy_inputs(__a ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Dict:
UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
UpperCamelCase = torch.from_numpy(np.array(__a ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
UpperCamelCase = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCamelCase = "A robot, 4k photo"
UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase = pipeline(
image_embeds=__a , negative_image_embeds=__a , hint=__a , generator=__a , num_inference_steps=1_00 , output_type="np" , )
UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__a , __a )
| 153 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361 |
def _A ( lowerCAmelCase_ : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 221 | 0 |
from __future__ import annotations
from fractions import Fraction
def A ( a_ ,a_ ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( a_ ) -> list[str]:
__UpperCamelCase : Dict =[]
__UpperCamelCase : Union[str, Any] =11
__UpperCamelCase : List[str] =int('1' + '0' * digit_len )
for num in range(a_ ,a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ ,a_ ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
__UpperCamelCase : Any =10
return solutions
def A ( a_ = 2 ) -> int:
__UpperCamelCase : Optional[Any] =1.0
for fraction in fraction_list(a_ ):
__UpperCamelCase : int =Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 71 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE : Any = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
SCREAMING_SNAKE_CASE : Union[str, Any] = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE : int = re.compile(rF'^({joined_dirs}).*?\.py$')
SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 102 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase : Any = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
if "://" in dataset_path:
lowercase : Tuple = dataset_path.split("""://""" )[1]
return dataset_path
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[Any] = not is_remote_filesystem(SCREAMING_SNAKE_CASE__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) )
else:
fs.mv(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , recursive=SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Tuple = threading.Lock()
| 285 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowercase : Any = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowercase : List[Any] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowercase : Optional[int] = 0
lowercase : Any = 1
lowercase : Any = 2
lowercase : str = 3
lowercase : Optional[Any] = 4
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= VOCAB_FILES_NAMES
_a : Optional[Any]= PRETRAINED_VOCAB_FILES_MAP
_a : List[str]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any]= "left"
def __init__( self ,snake_case ,snake_case=False ,snake_case=True ,snake_case=False ,snake_case="<s>" ,snake_case="</s>" ,snake_case="<unk>" ,snake_case="<sep>" ,snake_case="<pad>" ,snake_case="<cls>" ,snake_case="<mask>" ,snake_case=["<eop>", "<eod>"] ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : str = AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else mask_token
lowercase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case ,remove_space=snake_case ,keep_accents=snake_case ,bos_token=snake_case ,eos_token=snake_case ,unk_token=snake_case ,sep_token=snake_case ,pad_token=snake_case ,cls_token=snake_case ,mask_token=snake_case ,additional_special_tokens=snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**snake_case ,)
lowercase : str = 3
lowercase : str = do_lower_case
lowercase : List[Any] = remove_space
lowercase : Dict = keep_accents
lowercase : Union[str, Any] = vocab_file
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowercase : Any = self.__dict__.copy()
lowercase : List[str] = None
return state
def __setstate__( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowercase : List[str] = {}
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.remove_space:
lowercase : Optional[int] = """ """.join(inputs.strip().split() )
else:
lowercase : Optional[int] = inputs
lowercase : Dict = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
lowercase : int = unicodedata.normalize("""NFKD""" ,snake_case )
lowercase : Any = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
lowercase : Any = outputs.lower()
return outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.preprocess_text(snake_case )
lowercase : Any = self.sp_model.encode(snake_case ,out_type=snake_case )
lowercase : Tuple = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowercase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase : Any = cur_pieces[1:]
else:
lowercase : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = """""".join(snake_case ).replace(snake_case ,""" """ ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = False ,snake_case = None ,snake_case = True ,**snake_case ,):
'''simple docstring'''
lowercase : List[str] = kwargs.pop("""use_source_tokenizer""" ,snake_case )
lowercase : List[str] = self.convert_ids_to_tokens(snake_case ,skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase : Optional[int] = []
lowercase : Optional[int] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
lowercase : Union[str, Any] = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase : Any = """""".join(snake_case )
lowercase : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase : int = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : int = [self.sep_token_id]
lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case ,token_ids_a=snake_case ,already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : List[str] = [self.sep_token_id]
lowercase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase : Any = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case ,"""wb""" ) as fi:
lowercase : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 285 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''', [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=13_37, num_examples=42, dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=13_37, num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
], )
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = split_dict._to_yaml_list()
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = SplitDict._from_yaml_list(__snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
_UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''', [SplitInfo(), SplitInfo(dataset_name=__snake_case ), SplitInfo(dataset_name='''my_dataset''' )] )
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 194 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a = parser.parse_args()
if args.model_type == "bert":
_a = BertForMaskedLM.from_pretrained(args.model_name)
_a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a = model.state_dict()
_a = {}
for w in ["word_embeddings", "position_embeddings"]:
_a = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a = state_dict["""cls.predictions.decoder.weight"""]
_a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 194 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : list ) -> list:
A_ = len(_UpperCamelCase )
for i in range(1, _UpperCamelCase ):
A_ = collection[i]
A_ = 0
A_ = i - 1
while low <= high:
A_ = (low + high) // 2
if val < collection[mid]:
A_ = mid - 1
else:
A_ = mid + 1
for j in range(_UpperCamelCase, _UpperCamelCase, -1 ):
A_ = collection[j - 1]
A_ = val
return collection
if __name__ == "__main__":
__snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
__snake_case : List[Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 357 | '''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
__snake_case : Tuple = {'allegro/herbert-base-cased': 514}
__snake_case : List[str] = {}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = HerbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="</s>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
A_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 18 | 0 |
from typing import Any
class snake_case_ :
def __init__( self : Any , lowercase_ : Any ) -> List[str]:
lowercase__ : str = data
lowercase__ : List[str] = None
class snake_case_ :
def __init__( self : Tuple ) -> List[str]:
lowercase__ : List[str] = None
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowercase__ : Any = self.head
while temp is not None:
print(temp.data , end=" " )
lowercase__ : Tuple = temp.next
print()
def __UpperCamelCase ( self : List[Any] , lowercase_ : Any ) -> Tuple:
lowercase__ : Dict = Node(lowercase_ )
lowercase__ : Optional[int] = self.head
lowercase__ : Union[str, Any] = new_node
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Optional[int]:
if node_data_a == node_data_a:
return
else:
lowercase__ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase__ : str = node_a.next
lowercase__ : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase__ : Any = node_a.next
if node_a is None or node_a is None:
return
lowercase__ , lowercase__ : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 87 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , **UpperCAmelCase , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=UpperCAmelCase , kernel_size=UpperCAmelCase , strides=UpperCAmelCase , padding="VALID" , groups=UpperCAmelCase , use_bias=UpperCAmelCase , name="convolution" , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.convolution(self.padding(UpperCAmelCase ) )
lowercase_ = self.normalization(UpperCAmelCase )
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def A__ ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = shape_list(UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = 2 , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = tf.keras.layers.ConvaD(
filters=UpperCAmelCase , kernel_size=1 , strides=UpperCAmelCase , use_bias=UpperCAmelCase , name="convolution" )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(UpperCAmelCase ) , training=UpperCAmelCase )
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase , name="pooler" )
lowercase_ = [
tf.keras.layers.ConvaD(filters=UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = self.pooler(UpperCAmelCase )
for layer_module in self.attention:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = hidden_state * pooled
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(UpperCAmelCase , stride=UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase , stride=UpperCAmelCase , groups=UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase , name="layer.2" ),
]
lowercase_ = ACTaFN[config.hidden_act]
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(UpperCAmelCase , stride=UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowercase_ = [
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase , stride=UpperCAmelCase , groups=UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase , name="layer.3" ),
]
lowercase_ = ACTaFN[config.hidden_act]
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , name="layers.0" ),
*[layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase , name=F'stages.{i+1}' ) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(UpperCAmelCase )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase )
@keras_serializable
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
lowerCAmelCase__ = RegNetConfig
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(UpperCAmelCase , name="embedder" )
lowercase_ = TFRegNetEncoder(UpperCAmelCase , name="encoder" )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase , name="pooler" )
@unpack_inputs
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = RegNetConfig
lowerCAmelCase__ = "regnet"
lowerCAmelCase__ = "pixel_values"
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE__ = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , snake_case_ , )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowercase_ = TFRegNetMainLayer(UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , snake_case_ , )
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(UpperCAmelCase , name="regnet" )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](UpperCAmelCase )
lowercase_ = self.classifier[1](UpperCAmelCase )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase , logits=UpperCAmelCase )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
| 297 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : Any=3 ,_snake_case : List[Any]=32 ,_snake_case : str=3 ,_snake_case : List[Any]=10 ,_snake_case : Any=[8, 16, 32, 64] ,_snake_case : Optional[int]=[1, 1, 2, 1] ,_snake_case : Dict=True ,_snake_case : Dict=True ,_snake_case : List[Any]="relu" ,_snake_case : int=3 ,_snake_case : Dict=None ,_snake_case : List[Any]=["stage2", "stage3", "stage4"] ,_snake_case : List[Any]=[2, 3, 4] ,_snake_case : int=1 ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = parent
lowercase__ : Dict = batch_size
lowercase__ : Any = image_size
lowercase__ : Dict = num_channels
lowercase__ : int = embeddings_size
lowercase__ : str = hidden_sizes
lowercase__ : Tuple = depths
lowercase__ : Tuple = is_training
lowercase__ : str = use_labels
lowercase__ : int = hidden_act
lowercase__ : List[Any] = num_labels
lowercase__ : Dict = scope
lowercase__ : Dict = len(_snake_case )
lowercase__ : Optional[int] = out_features
lowercase__ : List[str] = out_indices
lowercase__ : Any = num_groups
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : str ) -> int:
"""simple docstring"""
lowercase__ : Tuple = BitModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : str ,_snake_case : List[str] ,_snake_case : int ,_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[str] = BitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : str ) -> Dict:
"""simple docstring"""
lowercase__ : int = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase : List[str] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Any = False
lowerCAmelCase : str = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = BitModelTester(self )
lowercase__ : Tuple = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(config=_snake_case )
for name, module in model.named_modules():
if isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Optional[Any] ):
lowercase__ : Optional[int] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : int = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : Dict = layer_type
lowercase__ : Tuple = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = BitModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> List[str]:
lowercase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_snake_case )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : str = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )
# verify the logits
lowercase__ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : List[str] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
@require_torch
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase : Any = BitConfig
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = BitModelTester(self )
| 16 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int = None , lowerCAmelCase_ : int = None ) -> str:
'''simple docstring'''
super().__init__()
A__ : Optional[Any] =pad_token_id
A__ : int =max_length
A__ : Optional[int] =vocab
A__ : Any =merges
A__ : Optional[Any] =BytePairTokenizer(lowerCAmelCase_ , lowerCAmelCase_ , sequence_length=lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : Optional[int] , lowerCAmelCase_ : GPTaTokenizer , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
A__ : Any =[""" """.join(lowerCAmelCase_ ) for m in tokenizer.bpe_ranks.keys()]
A__ : List[str] =tokenizer.get_vocab()
return cls(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : Tuple , lowerCAmelCase_ : Union[str, os.PathLike] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =GPTaTokenizer.from_pretrained(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
return cls.from_tokenizer(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : str , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return cls(**lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int = None ) -> Tuple:
'''simple docstring'''
A__ : Optional[int] =self.tf_tokenizer(lowerCAmelCase_ )
A__ : List[Any] =tf.ones_like(lowerCAmelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
A__ : Union[str, Any] =max_length if max_length is not None else self.max_length
if max_length is not None:
A__ , A__ : Any =pad_model_inputs(
lowerCAmelCase_ , max_seq_length=lowerCAmelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 134 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
A__ : Union[str, Any] =4
A__ : Optional[Any] =3
class UpperCAmelCase ( snake_case_ ):
pass
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
for shard in shards:
for i in range(lowerCAmelCase ):
yield {"i": i, "shard": shard}
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = int(os.environ["""RANK"""] )
_lowerCAmelCase = int(os.environ["""WORLD_SIZE"""] )
_lowerCAmelCase = ArgumentParser()
parser.add_argument("""--streaming""" , type=lowerCAmelCase )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase )
parser.add_argument("""--num_workers""" , type=lowerCAmelCase , default=0 )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.streaming
_lowerCAmelCase = args.num_workers
_lowerCAmelCase = {"""shards""": [f"shard_{shard_idx}" for shard_idx in range(lowerCAmelCase )]}
_lowerCAmelCase = IterableDataset.from_generator(lowerCAmelCase , gen_kwargs=lowerCAmelCase )
if not streaming:
_lowerCAmelCase = Dataset.from_list(list(lowerCAmelCase ) )
_lowerCAmelCase = split_dataset_by_node(lowerCAmelCase , rank=lowerCAmelCase , world_size=lowerCAmelCase )
_lowerCAmelCase = torch.utils.data.DataLoader(lowerCAmelCase , num_workers=lowerCAmelCase )
_lowerCAmelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_lowerCAmelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_lowerCAmelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 354 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A__ : Any =logging.getLogger()
A__ : int =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Optional[Any] , __snake_case : Any ) -> int:
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowerCAmelCase = {"""source""": """What is love ?""", """target""": """life"""}
_lowerCAmelCase = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCAmelCase = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__snake_case , f"{split}.{field}" ) , """w""" ) as f:
f.write(__snake_case )
def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : str = "pytorch" ) -> int:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = os.path.join(__snake_case , """output""" )
_lowerCAmelCase = os.path.join(__snake_case , """data""" )
self._create_dummy_data(data_dir=__snake_case )
_lowerCAmelCase = f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
_lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__snake_case , env=self.get_env() )
_lowerCAmelCase = os.path.join(__snake_case , """metrics.json""" )
with open(__snake_case ) as f:
_lowerCAmelCase = json.load(__snake_case )
return result
@require_torch_gpu
def lowercase__ ( self : Dict ) -> Union[str, Any]:
_lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase__ ( self : int ) -> List[str]:
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 220 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__lowercase : Dict = 50000
__lowercase : Dict = 5000
__lowercase , __lowercase : Optional[int] = os.path.split(__file__)
__lowercase : Dict = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowercase_ ( _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
for i in range(_lowercase ):
lowerCamelCase_ : Dict = dataset[i]
@get_duration
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
for i in range(0 , len(_lowercase ) , _lowercase ):
lowerCamelCase_ : Union[str, Any] = dataset[i : i + batch_size]
@get_duration
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
with dataset.formatted_as(type=_lowercase ):
for i in range(_lowercase ):
lowerCamelCase_ : List[str] = dataset[i]
@get_duration
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
with dataset.formatted_as(type=_lowercase ):
for i in range(0 , _lowercase , _lowercase ):
lowerCamelCase_ : Dict = dataset[i : i + batch_size]
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
lowerCamelCase_ : Any = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
lowerCamelCase_ : Any = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
lowerCamelCase_ : Tuple = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
lowerCamelCase_ : Tuple = generate_example_dataset(
os.path.join(_lowercase , '''dataset.arrow''' ) , _lowercase , num_examples=_lowercase , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(_lowercase ) )
lowerCamelCase_ : Dict = func(_lowercase , **_lowercase )
print('''shuffling dataset''' )
lowerCamelCase_ : Tuple = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(_lowercase ) )
lowerCamelCase_ : Optional[Any] = func(
_lowercase , **_lowercase )
with open(_lowercase , '''wb''' ) as f:
f.write(json.dumps(_lowercase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 318 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = FunnelTokenizer
_UpperCAmelCase : List[str] = FunnelTokenizerFast
_UpperCAmelCase : str = True
_UpperCAmelCase : Optional[int] = True
def lowerCAmelCase ( self : Any):
super().setUp()
__lowerCamelCase : Union[str, Any] = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,**SCREAMING_SNAKE_CASE__ : List[str]):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = 'UNwant\u00E9d,running'
__lowerCamelCase : Tuple = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Tuple = self.tokenizer_class(self.vocab_file)
__lowerCamelCase : Any = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__) ,[7, 4, 5, 1_0, 8, 9])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : int = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__)
for tokenizer in tokenizers:
__lowerCamelCase : Dict = tokenizer('UNwant\u00E9d,running')
__lowerCamelCase : int = len(inputs['input_ids']) - 1
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len)
__lowerCamelCase : Dict = tokenizer('UNwant\u00E9d,running' ,'UNwant\u00E9d,running')
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len + [1] * sentence_len)
| 73 |
import os
import sys
import unittest
lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase = os.path.join(git_repo_path, 'src', 'transformers')
lowerCamelCase = '\n{0} = None\n'
lowerCamelCase = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
lowerCamelCase = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class A ( unittest.TestCase ):
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowercase_ )
_lowerCamelCase : List[str] =find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowercase_ , 'tokenizers' )
_lowerCamelCase : List[Any] =find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowercase_ , 'tensorflow_text' )
_lowerCamelCase : int =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tokenizers' )
_lowerCamelCase : Dict =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tensorflow_text' )
_lowerCamelCase : List[Any] =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tokenizers_and_vision' )
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowercase_ )
self.assertIn('tensorflow_text' , lowercase_ )
self.assertIn('sentencepiece_and_tokenizers' , lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowercase_ , '\nCONSTANT = None\n' )
_lowerCamelCase : Dict =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowercase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_lowerCamelCase : Union[str, Any] ='\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
_lowerCamelCase : Tuple =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Dict ='# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
_lowerCamelCase : Optional[int] =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowercase_ )
| 199 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 350 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = ['pixel_values']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 3_8_4}
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_lowerCamelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_lowerCamelCase = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowerCamelCase = int(shortest_edge / crop_pct )
_lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 73 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase ( snake_case_ ):
_lowercase: str = ['''image_processor''']
_lowercase: Dict = '''SamImageProcessor'''
def __init__( self : Dict , __snake_case : List[Any] ) -> Optional[Any]:
super().__init__(__snake_case )
_lowerCAmelCase = self.image_processor
_lowerCAmelCase = -10
_lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self : str , __snake_case : Dict=None , __snake_case : Dict=None , __snake_case : List[str]=None , __snake_case : Dict=None , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Any , ) -> BatchEncoding:
_lowerCAmelCase = self.image_processor(
__snake_case , return_tensors=__snake_case , **__snake_case , )
# pop arguments that are not used in the foward but used nevertheless
_lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__snake_case , """numpy""" ): # Checks if Torch or TF tensor
_lowerCAmelCase = original_sizes.numpy()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._check_and_preprocess_points(
input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , )
_lowerCAmelCase = self._normalize_and_convert(
__snake_case , __snake_case , input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , return_tensors=__snake_case , )
return encoding_image_processor
def lowercase__ ( self : List[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : Optional[Any]="pt" , ) -> Dict:
if input_points is not None:
if len(__snake_case ) != len(__snake_case ):
_lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] ) for point in input_points
]
else:
_lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case )
for point, original_size in zip(__snake_case , __snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowerCAmelCase , _lowerCAmelCase = self._pad_points_and_labels(__snake_case , __snake_case )
_lowerCAmelCase = np.array(__snake_case )
if input_labels is not None:
_lowerCAmelCase = np.array(__snake_case )
if input_boxes is not None:
if len(__snake_case ) != len(__snake_case ):
_lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] , is_bounding_box=__snake_case )
for box in input_boxes
]
else:
_lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case , is_bounding_box=__snake_case )
for box, original_size in zip(__snake_case , __snake_case )
]
_lowerCAmelCase = np.array(__snake_case )
if input_boxes is not None:
if return_tensors == "pt":
_lowerCAmelCase = torch.from_numpy(__snake_case )
# boxes batch size of 1 by default
_lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowerCAmelCase = tf.convert_to_tensor(__snake_case )
# boxes batch size of 1 by default
_lowerCAmelCase = tf.expand_dims(__snake_case , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowerCAmelCase = torch.from_numpy(__snake_case )
# point batch size of 1 by default
_lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowerCAmelCase = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
_lowerCAmelCase = tf.expand_dims(__snake_case , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowerCAmelCase = torch.from_numpy(__snake_case )
# point batch size of 1 by default
_lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowerCAmelCase = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
_lowerCAmelCase = tf.expand_dims(__snake_case , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowercase__ ( self : str , __snake_case : List[Any] , __snake_case : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = max([point.shape[0] for point in input_points] )
_lowerCAmelCase = []
for i, point in enumerate(__snake_case ):
if point.shape[0] != expected_nb_points:
_lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_lowerCAmelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__snake_case )
_lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowercase__ ( self : Optional[int] , __snake_case : int , __snake_case : np.ndarray , __snake_case : Tuple , __snake_case : List[str]=False ) -> np.ndarray:
_lowerCAmelCase , _lowerCAmelCase = original_size
_lowerCAmelCase , _lowerCAmelCase = self.image_processor._get_preprocess_shape(__snake_case , longest_edge=__snake_case )
_lowerCAmelCase = deepcopy(__snake_case ).astype(__snake_case )
if is_bounding_box:
_lowerCAmelCase = coords.reshape(-1 , 2 , 2 )
_lowerCAmelCase = coords[..., 0] * (new_w / old_w)
_lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowerCAmelCase = coords.reshape(-1 , 4 )
return coords
def lowercase__ ( self : List[str] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : str=None , ) -> Tuple:
if input_points is not None:
if hasattr(__snake_case , """numpy""" ): # Checks for TF or Torch tensor
_lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_points[0] , __snake_case ):
raise ValueError("""Input points must be a list of list of floating points.""" )
_lowerCAmelCase = [np.array(__snake_case ) for input_point in input_points]
else:
_lowerCAmelCase = None
if input_labels is not None:
if hasattr(__snake_case , """numpy""" ):
_lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_labels[0] , __snake_case ):
raise ValueError("""Input labels must be a list of list integers.""" )
_lowerCAmelCase = [np.array(__snake_case ) for label in input_labels]
else:
_lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__snake_case , """numpy""" ):
_lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__snake_case , __snake_case )
or not isinstance(input_boxes[0] , __snake_case )
or not isinstance(input_boxes[0][0] , __snake_case )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
_lowerCAmelCase = [np.array(__snake_case ).astype(np.floataa ) for box in input_boxes]
else:
_lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowercase__ ( self : Optional[int] ) -> Tuple:
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__snake_case ) )
def lowercase__ ( self : Optional[int] , *__snake_case : int , **__snake_case : str ) -> List[str]:
return self.image_processor.post_process_masks(*__snake_case , **__snake_case )
| 70 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A__ : Dict ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A__ : Tuple =concatenate_datasets
A__ : Dict =DownloadConfig
A__ : int =DownloadManager
A__ : Union[str, Any] =DownloadMode
A__ : Tuple =DownloadConfig
A__ : Optional[Any] =DownloadMode
A__ : str =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 70 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_lowercase : Optional[int] =[
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_lowercase : List[Any] ="UperNetConfig"
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 0 , __lowercase = False , __lowercase = 1 , ) -> None:
"""simple docstring"""
super().__init__()
a__ : List[Any] = nn.Convad(
in_channels=__lowercase , out_channels=__lowercase , kernel_size=__lowercase , padding=__lowercase , bias=__lowercase , dilation=__lowercase , )
a__ : Optional[int] = nn.BatchNormad(__lowercase )
a__ : Any = nn.ReLU()
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> torch.Tensor:
"""simple docstring"""
a__ : Dict = self.conv(__lowercase )
a__ : str = self.batch_norm(__lowercase )
a__ : Optional[int] = self.activation(__lowercase )
return output
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase ) -> None:
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = [
nn.AdaptiveAvgPoolad(__lowercase ),
UperNetConvModule(__lowercase , __lowercase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowercase ) , __lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> torch.Tensor:
"""simple docstring"""
a__ : Union[str, Any] = input
for layer in self.layers:
a__ : str = layer(__lowercase )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = pool_scales
a__ : int = align_corners
a__ : List[Any] = in_channels
a__ : Dict = channels
a__ : Optional[int] = []
for i, pool_scale in enumerate(__lowercase ):
a__ : int = UperNetPyramidPoolingBlock(pool_scale=__lowercase , in_channels=__lowercase , channels=__lowercase )
self.blocks.append(__lowercase )
self.add_module(str(__lowercase ) , __lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[torch.Tensor]:
"""simple docstring"""
a__ : Optional[Any] = []
for ppm in self.blocks:
a__ : List[str] = ppm(__lowercase )
a__ : Any = nn.functional.interpolate(
__lowercase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(__lowercase )
return ppm_outs
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
super().__init__()
a__ : Dict = config
a__ : List[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
a__ : Any = in_channels
a__ : Tuple = config.hidden_size
a__ : Union[str, Any] = False
a__ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a__ : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a__ : Any = nn.ModuleList()
a__ : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a__ : Any = UperNetConvModule(__lowercase , self.channels , kernel_size=1 )
a__ : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowercase )
self.fpn_convs.append(__lowercase )
a__ : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__lowercase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = inputs[-1]
a__ : Any = [x]
psp_outs.extend(self.psp_modules(__lowercase ) )
a__ : str = torch.cat(__lowercase , dim=1 )
a__ : Optional[Any] = self.bottleneck(__lowercase )
return output
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> torch.Tensor:
"""simple docstring"""
a__ : int = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowercase ) )
# build top-down path
a__ : List[str] = len(__lowercase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a__ : str = laterals[i - 1].shape[2:]
a__ : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowercase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
a__ : Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a__ : Optional[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
a__ : Any = torch.cat(__lowercase , dim=1 )
a__ : Optional[int] = self.fpn_bottleneck(__lowercase )
a__ : Optional[Any] = self.classifier(__lowercase )
return output
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase = 2 , __lowercase = 3 , __lowercase = 1 ) -> None:
"""simple docstring"""
super().__init__()
a__ : Union[str, Any] = config
a__ : Union[str, Any] = config.auxiliary_in_channels
a__ : Tuple = config.auxiliary_channels
a__ : str = config.auxiliary_num_convs
a__ : Tuple = config.auxiliary_concat_input
a__ : str = in_index
a__ : Tuple = (kernel_size // 2) * dilation
a__ : List[str] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowercase , padding=__lowercase , dilation=__lowercase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowercase , padding=__lowercase , dilation=__lowercase ) )
if self.num_convs == 0:
a__ : int = nn.Identity()
else:
a__ : int = nn.Sequential(*__lowercase )
if self.concat_input:
a__ : Optional[int] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowercase , padding=kernel_size // 2 )
a__ : Union[str, Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
if isinstance(__lowercase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> torch.Tensor:
"""simple docstring"""
a__ : str = encoder_hidden_states[self.in_index]
a__ : List[Any] = self.convs(__lowercase )
if self.concat_input:
a__ : Tuple = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a__ : List[Any] = self.classifier(__lowercase )
return output
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = UperNetConfig
__lowerCAmelCase :str = "pixel_values"
__lowerCAmelCase :int = True
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False ) -> List[str]:
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
a__ : Dict = value
_lowercase : Union[str, Any] =r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : List[Any] =r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , A__ , )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
super().__init__(__lowercase )
a__ : Optional[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a__ : Tuple = UperNetHead(__lowercase , in_channels=self.backbone.channels )
a__ : Union[str, Any] = UperNetFCNHead(__lowercase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=__lowercase , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE__( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
a__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
a__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : Union[str, Any] = output_attentions if output_attentions is not None else self.config.output_attentions
a__ : int = self.backbone.forward_with_filtered_kwargs(
__lowercase , output_hidden_states=__lowercase , output_attentions=__lowercase )
a__ : Any = outputs.feature_maps
a__ : Tuple = self.decode_head(__lowercase )
a__ : Any = nn.functional.interpolate(__lowercase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowercase )
a__ : Tuple = None
if self.auxiliary_head is not None:
a__ : Dict = self.auxiliary_head(__lowercase )
a__ : int = nn.functional.interpolate(
__lowercase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowercase )
a__ : List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
a__ : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a__ : int = loss_fct(__lowercase , __lowercase )
a__ : List[Any] = loss_fct(__lowercase , __lowercase )
a__ : Optional[int] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a__ : Tuple = (logits,) + outputs[1:]
else:
a__ : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 266 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = params
a__ : str = np.array(__lowercase )
a__ : List[Any] = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowercase ) -> Any:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.lengths )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.params.max_model_input_size
a__ : int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' )
def divide_chunks(__lowercase , __lowercase ):
return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )]
a__ : Any = []
a__ : Optional[int] = []
if self.params.mlm:
a__ , a__ : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
a__ , a__ : Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ : int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ : str = np.insert(__lowercase , 0 , __lowercase )
if sub_s[-1] != sep_id:
a__ : List[str] = np.insert(__lowercase , len(__lowercase ) , __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
a__ : Optional[int] = np.array(__lowercase )
a__ : Any = np.array(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = len(self )
a__ : List[str] = self.lengths > 1_1
a__ : Dict = self.token_ids[indices]
a__ : List[str] = self.lengths[indices]
a__ : int = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ : Union[str, Any] = self.params.special_tok_ids["""unk_token"""]
a__ : List[Any] = len(self )
a__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ : Optional[Any] = (unk_occs / self.lengths) < 0.5
a__ : Tuple = self.token_ids[indices]
a__ : Union[str, Any] = self.lengths[indices]
a__ : Tuple = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = [t[0] for t in batch]
a__ : Any = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
a__ : List[Any] = max(__lowercase )
# Pad token ids
if self.params.mlm:
a__ : int = self.params.special_tok_ids["""pad_token"""]
else:
a__ : List[str] = self.params.special_tok_ids["""unk_token"""]
a__ : int = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
a__ : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ : Optional[int] = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t
| 266 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __a, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = PhobertTokenizer
UpperCAmelCase = False
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
A__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
A__ = ["""#version: 0.2""", """l à</w>"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_UpperCamelCase ) )
def UpperCamelCase ( self: Union[str, Any] , **UpperCamelCase: Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = """Tôi là VinAI Research"""
A__ = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = """Tôi là VinAI Research"""
A__ = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
A__ = tokenizer.tokenize(_UpperCamelCase )
print(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
A__ = tokens + [tokenizer.unk_token]
A__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
| 335 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =None
_lowercase =BloomTokenizerFast
_lowercase =BloomTokenizerFast
_lowercase =True
_lowercase =False
_lowercase ='''tokenizer_file'''
_lowercase ={'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def __a ( self ) -> Dict:
super().setUp()
lowerCAmelCase_ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **_UpperCamelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase_ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCAmelCase_ = tokenizer.batch_encode_plus(_UpperCamelCase )["input_ids"]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self , _UpperCamelCase=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase_ = "This is a simple input"
lowerCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ = ("This is a simple input", "This is a pair")
lowerCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase_ = None # Hotfixing padding = None
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = load_dataset("xnli" , "all_languages" , split="test" , streaming=_UpperCamelCase )
lowerCAmelCase_ = next(iter(_UpperCamelCase ) )["premise"] # pick up one data
lowerCAmelCase_ = list(sample_data.values() )
lowerCAmelCase_ = list(map(tokenizer.encode , _UpperCamelCase ) )
lowerCAmelCase_ = [tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) for x in output_tokens]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> List[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 231 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
_UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn", "attention.self" )
if "fc1" in key:
_UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Any = key.replace("linear_pred", "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" )
_UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" )
_UpperCAmelCase : int = value
return new_state_dict
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ):
_UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase : List[str] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
_UpperCAmelCase : Dict = model(a_ )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_UpperCAmelCase : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 364 | '''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = 10
_UpperCAmelCase : int = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_UpperCAmelCase : List[str] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(a_ ) ),
}, features=a_, )
return dataset
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=a_ )
return filename
# FILE_CONTENT + files
__a = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt"
_UpperCAmelCase : Tuple = FILE_CONTENT
with open(a_, "w" ) as f:
f.write(a_ )
return filename
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" )
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import gzip
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_UpperCAmelCase : Any = bytes(a_, "utf-8" )
with gzip.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_UpperCAmelCase : str = bytes(a_, "utf-8" )
with lza.frame.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Any ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(a_, "w" ) as archive:
archive.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: List[str] ):
import tarfile
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
import lzma
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_UpperCAmelCase : List[str] = bytes(a_, "utf-8" )
with lzma.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: Tuple ):
import zipfile
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_UpperCAmelCase : int = bytes(a_, "utf-8" )
with zstd.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml"
_UpperCAmelCase : Tuple = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(a_, "w" ) as f:
f.write(a_ )
return filename
__a = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__a = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__a = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__a = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__a = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : str = datasets.Dataset.from_dict(a_ )
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(a_ ) ) as con:
_UpperCAmelCase : List[Any] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str, a_: str ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(a_, "rb" ) as f:
_UpperCAmelCase : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ):
_UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) )
f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ):
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_UpperCAmelCase : Dict = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(a_, "wb" ) as f:
_UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ )
_UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ )
writer.write_table(a_ )
writer.close()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : str = {"data": DATA}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_312:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ):
import gzip
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ):
import gzip
_UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ):
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : List[str] = ["0", "1", "2", "3"]
_UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Dict = ["0", "1", "2", "3"]
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = ["0", "1", "2", "3"]
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename("unsupported.ext" ) )
f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
return data_dir | 17 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Any:
# A mock response for an HTTP head request to emulate server down
_A : Optional[int] = mock.Mock()
_A : Optional[Any] = 500
_A : Dict = {}
_A : Union[str, Any] = HTTPError
_A : List[Any] = {}
# Download this model to make sure it's in the cache.
_A : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head:
_A : List[Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
_A : str = mock.Mock()
_A : Any = 500
_A : Optional[int] = {}
_A : List[str] = HTTPError
_A : int = {}
# Download this model to make sure it's in the cache.
_A : int = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head:
_A : str = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def a__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
try:
_A : Tuple = tempfile.mktemp()
with open(_a , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , _a )
_A : str = AlbertTokenizer.from_pretrained(_a )
finally:
os.remove(_a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , _a )
_A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def a__ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
_A : Dict = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class lowercase ( unittest.TestCase ):
_a = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def a__ ( cls ) -> Tuple:
_A : int = TOKEN
HfFolder.save_token(_a )
@classmethod
def a__ ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = os.path.join(_a , """vocab.txt""" )
with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_A : Optional[Any] = BertTokenizer(_a )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
_A : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a , repo_id="""test-tokenizer""" , push_to_hub=_a , use_auth_token=self._token )
_A : List[str] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[int] = os.path.join(_a , """vocab.txt""" )
with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_A : Tuple = BertTokenizer(_a )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
_A : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_a , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=_a , use_auth_token=self._token )
_A : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def a__ ( self ) -> str:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = os.path.join(_a , """vocab.txt""" )
with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_A : str = CustomTokenizer(_a )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
_A : Optional[int] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Any = os.path.join(_a , """vocab.txt""" )
with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_A : Dict = BertTokenizerFast.from_pretrained(_a )
bert_tokenizer.save_pretrained(_a )
_A : Dict = CustomTokenizerFast.from_pretrained(_a )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
_A : Tuple = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
_A : Tuple = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=_a , trust_remote_code=_a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def a__ ( self ) -> Dict:
_A : List[Any] = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def a__ ( self ) -> Dict:
_A : Dict = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def a__ ( self ) -> List[Any]:
_A : Dict = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def a__ ( self ) -> int:
_A : Any = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def a__ ( self ) -> List[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
_A : Tuple = Trie()
_A : int = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_a , ["""AB""", """C"""] )
| 26 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
) | 362 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 4000000 ) -> int:
'''simple docstring'''
__lowerCamelCase : Tuple = [0, 1]
__lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowerCamelCase : Tuple = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""") | 64 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """camembert"""
def __init__( self :List[Any] , lowerCamelCase :str=3_0522 , lowerCamelCase :int=768 , lowerCamelCase :List[str]=12 , lowerCamelCase :List[Any]=12 , lowerCamelCase :str=3072 , lowerCamelCase :Optional[int]="gelu" , lowerCamelCase :Optional[Any]=0.1 , lowerCamelCase :Any=0.1 , lowerCamelCase :str=512 , lowerCamelCase :List[str]=2 , lowerCamelCase :str=0.02 , lowerCamelCase :Any=1e-12 , lowerCamelCase :int=1 , lowerCamelCase :Dict=0 , lowerCamelCase :Tuple=2 , lowerCamelCase :Optional[Any]="absolute" , lowerCamelCase :Dict=True , lowerCamelCase :int=None , **lowerCamelCase :Dict , ) -> str:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
class _UpperCamelCase ( lowerCAmelCase ):
@property
def UpperCAmelCase_ ( self :int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 169 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_lowerCAmelCase : List[Any] = "scheduler_config.json"
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
UpperCAmelCase_ = 5
@dataclass
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
class _UpperCamelCase :
UpperCAmelCase_ = SCHEDULER_CONFIG_NAME
UpperCAmelCase_ = ["""dtype"""]
UpperCAmelCase_ = []
UpperCAmelCase_ = True
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , lowerCamelCase :Dict[str, Any] = None , lowerCamelCase :Optional[str] = None , lowerCamelCase :Any=False , **lowerCamelCase :Dict , ) -> str:
UpperCAmelCase__ , UpperCAmelCase__ = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ , UpperCAmelCase__ = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase )
if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ):
UpperCAmelCase__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Union[str, os.PathLike] , lowerCamelCase :bool = False , **lowerCamelCase :Optional[int] ) -> Dict:
self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase )
@property
def UpperCAmelCase_ ( self :List[Any] ) -> Any:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls :str ) -> Optional[int]:
UpperCAmelCase__ = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase__ = importlib.import_module(__name__.split("." )[0] )
UpperCAmelCase__ = [
getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase )
]
return compatible_classes
def lowerCAmelCase ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : Tuple[int] ):
"""simple docstring"""
assert len(_lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCAmelCase ) - x.ndim) ) , _lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : int , _lowerCAmelCase : List[str]=0.999 , _lowerCAmelCase : Optional[int]=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_lowerCAmelCase : Tuple ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
UpperCAmelCase__ = []
for i in range(_lowerCAmelCase ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCAmelCase ) / alpha_bar(_lowerCAmelCase ) , _lowerCAmelCase ) )
return jnp.array(_lowerCAmelCase , dtype=_lowerCAmelCase )
@flax.struct.dataclass
class _UpperCamelCase :
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , lowerCamelCase :Optional[int] ) -> Optional[int]:
UpperCAmelCase__ = scheduler.config
if config.trained_betas is not None:
UpperCAmelCase__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCAmelCase__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase__ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCAmelCase__ = 1.0 - betas
UpperCAmelCase__ = jnp.cumprod(lowerCamelCase , axis=0 )
return cls(
alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , )
def lowerCAmelCase ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
UpperCAmelCase__ = state.alphas_cumprod
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
UpperCAmelCase__ = broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
UpperCAmelCase__ = broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 169 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _a ( unittest.TestCase ):
def snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCamelCase : int = tempfile.mkdtemp()
_UpperCamelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'''do_convert_rgb''': True,
}
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, lowerCAmelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : str, **lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : Union[str, Any], **lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : Any, **lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : str ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
_UpperCamelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__ )
def snake_case ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Dict = self.get_tokenizer(cls_token='''(CLS)''', sep_token='''(SEP)''' )
_UpperCamelCase : List[str] = self.get_image_processor(do_normalize=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token='''(CLS)''', sep_token='''(SEP)''', do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCAmelCase__ )
def snake_case ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.get_image_processor()
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.prepare_image_inputs()
_UpperCamelCase : Any = image_processor(lowerCAmelCase__, return_tensors='''np''' )
_UpperCamelCase : Any = processor(images=lowerCAmelCase__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Tuple = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : List[str] = processor(text=lowerCAmelCase__ )
_UpperCamelCase : Any = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCamelCase : str = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def snake_case ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase : List[Any] = processor.batch_decode(lowerCAmelCase__ )
_UpperCamelCase : Dict = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = self.get_image_processor()
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : int = self.prepare_image_inputs()
_UpperCamelCase : Dict = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 364 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = AltDiffusionPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self : int ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=3_2, )
_UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCAmelCase__, set_alpha_to_one=lowerCAmelCase__, )
torch.manual_seed(0 )
_UpperCamelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_UpperCamelCase : str = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, projection_dim=3_2, intermediate_size=3_7, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5_0_0_2, )
_UpperCamelCase : List[Any] = CLIPTextModel(lowerCAmelCase__ )
_UpperCamelCase : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_UpperCamelCase : str = 7_7
_UpperCamelCase : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case ( self : Dict, lowerCAmelCase__ : Any, lowerCAmelCase__ : int=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('''mps''' ):
_UpperCamelCase : Any = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case ( self : List[Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : int = self.get_dummy_components()
torch.manual_seed(0 )
_UpperCamelCase : Any = RobertaSeriesConfig(
hidden_size=3_2, project_dim=3_2, intermediate_size=3_7, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, vocab_size=5_0_0_2, )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCamelCase : Tuple = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_UpperCamelCase : str = text_encoder
_UpperCamelCase : List[Any] = AltDiffusionPipeline(**lowerCAmelCase__ )
_UpperCamelCase : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = '''A photo of an astronaut'''
_UpperCamelCase : Any = alt_pipe(**lowerCAmelCase__ )
_UpperCamelCase : Any = output.images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCamelCase : List[Any] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Optional[Any] = self.get_dummy_components()
_UpperCamelCase : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
_UpperCamelCase : int = RobertaSeriesConfig(
hidden_size=3_2, project_dim=3_2, intermediate_size=3_7, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, vocab_size=5_0_0_2, )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCamelCase : Tuple = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_UpperCamelCase : int = text_encoder
_UpperCamelCase : str = AltDiffusionPipeline(**lowerCAmelCase__ )
_UpperCamelCase : List[str] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = alt_pipe(**lowerCAmelCase__ )
_UpperCamelCase : List[str] = output.images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCamelCase : List[Any] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase : str = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''', safety_checker=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : int = '''A painting of a squirrel eating a burger'''
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Dict = alt_pipe([prompt], generator=lowerCAmelCase__, guidance_scale=6.0, num_inference_steps=2_0, output_type='''np''' )
_UpperCamelCase : Optional[int] = output.images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCamelCase : List[str] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase : Any = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''', subfolder='''scheduler''' )
_UpperCamelCase : Dict = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__ )
_UpperCamelCase : Dict = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = '''A painting of a squirrel eating a burger'''
_UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = alt_pipe([prompt], generator=lowerCAmelCase__, num_inference_steps=2, output_type='''numpy''' )
_UpperCamelCase : Tuple = output.images
_UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCamelCase : str = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 128 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[Any] , snake_case :Optional[Any]="</s>" , snake_case :Dict="<unk>" , snake_case :List[str]="<pad>" , snake_case :Optional[Any]=125 , snake_case :Any=None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
A_ : Optional[int] = [f"<extra_id_{i}>" for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A_ : Union[str, Any] = len(set(filter(lambda snake_case : bool("extra_id" in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
A_ : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
A_ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , **snake_case , )
A_ : str = extra_ids
A_ : Tuple = 2**8 # utf is 8 bits
# define special tokens dict
A_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
A_ : List[str] = len(self.special_tokens_encoder )
A_ : str = len(snake_case )
for i, token in enumerate(snake_case ):
A_ : str = self.vocab_size + i - n
A_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[int] , snake_case :Optional[List[int]] = None , snake_case :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[int] ):
'''simple docstring'''
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
A_ : Union[str, Any] = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str ):
'''simple docstring'''
A_ : str = [chr(snake_case ) for i in text.encode("utf-8" )]
return tokens
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Union[str, Any] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
A_ : Optional[Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
A_ : str = self.added_tokens_encoder[token]
elif len(snake_case ) != 1:
A_ : str = self.unk_token_id
else:
A_ : List[Any] = ord(snake_case ) + self._num_special_tokens
return token_id
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Optional[Any] ):
'''simple docstring'''
if index in self.special_tokens_decoder:
A_ : Union[str, Any] = self.special_tokens_decoder[index]
else:
A_ : Optional[int] = chr(index - self._num_special_tokens )
return token
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Any ):
'''simple docstring'''
A_ : Any = B""
for token in tokens:
if token in self.special_tokens_decoder:
A_ : List[str] = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
A_ : Dict = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
A_ : Tuple = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
A_ : Optional[Any] = token.encode("utf-8" )
else:
A_ : Any = bytes([ord(snake_case )] )
bstring += tok_string
A_ : int = bstring.decode("utf-8" , errors="ignore" )
return string
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
return ()
| 300 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 171 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : List[str] = None
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : int = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
lowercase : Optional[Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
lowercase : List[str] = {
"google/rembert": 256,
}
lowercase : Tuple = "▁"
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = RemBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
__UpperCamelCase : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
__UpperCamelCase : Any = do_lower_case
__UpperCamelCase : List[str] = remove_space
__UpperCamelCase : Optional[Any] = keep_accents
__UpperCamelCase : Union[str, Any] = vocab_file
__UpperCamelCase : Any = False if not self.vocab_file else True
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Any = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__UpperCamelCase ) )
return
__UpperCamelCase : Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,) | 171 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Tuple = tmp_path / "cache"
lowerCamelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase : Union[str, Any] = SqlDatasetReader(
"dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ,keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : Tuple = tmp_path / "cache"
lowerCamelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
lowerCamelCase : int = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,features=_SCREAMING_SNAKE_CASE ,cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
lowerCamelCase : List[str] = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : str = tmp_path / "cache"
lowerCamelCase : str = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" )
lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=1 ).write()
lowerCamelCase : Optional[Any] = iter_sql_file(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = tmp_path / "cache"
lowerCamelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" )
lowerCamelCase : List[str] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=2 ).write()
lowerCamelCase : List[str] = iter_sql_file(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowerCamelCase : List[Any] = tmp_path / "cache"
lowerCamelCase : List[str] = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" )
lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read()
with pytest.raises(_SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=0 ).write()
| 48 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int | float:
"""simple docstring"""
if len(__snake_case ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(__snake_case )
or left < -len(__snake_case )
or right >= len(__snake_case )
or right < -len(__snake_case )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
_UpperCamelCase = (left + right) >> 1 # the middle
_UpperCamelCase = find_max(__snake_case, __snake_case, __snake_case ) # find max in range[left, mid]
_UpperCamelCase = find_max(__snake_case, mid + 1, __snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 194 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
__A = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def snake_case_(_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = torch.load(_UpperCamelCase , map_location='''cpu''' )
return sd
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ) -> List[Any]:
"""simple docstring"""
_snake_case = OrderedDict()
_snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_snake_case = key
for name_pair in rename_keys_prefix:
_snake_case = new_key.replace(name_pair[0] , name_pair[1] )
_snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_snake_case = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
_snake_case = '''pretraining'''
if "vcr" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 2_048}
elif "vqa" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 2_048}
elif "nlvr" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 1_024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 512}
_snake_case = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 2_048}
_snake_case = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 2_048, '''num_labels''': 3_129}
_snake_case = '''vqa'''
elif "nlvr" in checkpoint_path:
_snake_case = {
'''visual_embedding_dim''': 1_024,
'''num_labels''': 2,
}
_snake_case = '''nlvr'''
_snake_case = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
_snake_case = load_state_dict(_UpperCamelCase )
_snake_case = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
_snake_case = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
_snake_case = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
_snake_case = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
_snake_case = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
__A = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 360 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 278 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase : Union[str, Any] =4
lowerCAmelCase : int =3
class a_ ( _lowerCAmelCase ):
pass
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def UpperCAmelCase_ ( ):
lowercase_ :Any = int(os.environ["RANK"] )
lowercase_ :str = int(os.environ["WORLD_SIZE"] )
lowercase_ :int = ArgumentParser()
parser.add_argument("--streaming" ,type=__lowerCamelCase )
parser.add_argument("--local_rank" ,type=__lowerCamelCase )
parser.add_argument("--num_workers" ,type=__lowerCamelCase ,default=0 )
lowercase_ :Dict = parser.parse_args()
lowercase_ :Optional[int] = args.streaming
lowercase_ :List[Any] = args.num_workers
lowercase_ :Tuple = {"shards": [F'shard_{shard_idx}' for shard_idx in range(__lowerCamelCase )]}
lowercase_ :Tuple = IterableDataset.from_generator(__lowerCamelCase ,gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase_ :Optional[Any] = Dataset.from_list(list(__lowerCamelCase ) )
lowercase_ :Union[str, Any] = split_dataset_by_node(__lowerCamelCase ,rank=__lowerCamelCase ,world_size=__lowerCamelCase )
lowercase_ :List[str] = torch.utils.data.DataLoader(__lowerCamelCase ,num_workers=__lowerCamelCase )
lowercase_ :Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase_ :Optional[Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase_ :Union[str, Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 223 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]="pt" ):
lowercase_ :Dict = {"add_prefix_space": True} if isinstance(__lowerCamelCase ,__lowerCamelCase ) and not line.startswith(" " ) else {}
lowercase_ :str = padding_side
return tokenizer(
[line] ,max_length=__lowerCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__lowerCamelCase ,return_tensors=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str=None ,):
lowercase_ :Optional[int] = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str="train" , lowercase : Dict=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : int="" , ):
"""simple docstring"""
super().__init__()
lowercase_ :List[Any] = Path(lowercase ).joinpath(type_path + ".source" )
lowercase_ :Dict = Path(lowercase ).joinpath(type_path + ".target" )
lowercase_ :Optional[int] = self.get_char_lens(self.src_file )
lowercase_ :List[str] = max_source_length
lowercase_ :str = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
lowercase_ :int = tokenizer
lowercase_ :Dict = prefix
if n_obs is not None:
lowercase_ :Union[str, Any] = self.src_lens[:n_obs]
lowercase_ :Optional[int] = src_lang
lowercase_ :str = tgt_lang
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Tuple = index + 1 # linecache starts at 1
lowercase_ :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("\n" )
lowercase_ :List[str] = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ :List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
lowercase_ :int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
lowercase_ :List[str] = encode_line(lowercase , lowercase , self.max_source_length , "right" )
lowercase_ :Any = encode_line(lowercase , lowercase , self.max_target_length , "right" )
lowercase_ :Dict = source_inputs["input_ids"].squeeze()
lowercase_ :Tuple = target_inputs["input_ids"].squeeze()
lowercase_ :Optional[int] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase__ ( lowercase : Union[str, Any] ):
"""simple docstring"""
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def lowercase__ ( self : str , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = torch.stack([x["input_ids"] for x in batch] )
lowercase_ :Dict = torch.stack([x["attention_mask"] for x in batch] )
lowercase_ :List[str] = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase_ :Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :Union[str, Any] = trim_batch(lowercase , lowercase )
lowercase_ , lowercase_ :Optional[Any] = trim_batch(lowercase , lowercase , attention_mask=lowercase )
lowercase_ :Tuple = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCAmelCase : List[str] =getLogger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :List[str] = get_git_info()
save_json(__lowerCamelCase ,os.path.join(__lowerCamelCase ,"git_log.json" ) )
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any]=4 ,**__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=__lowerCamelCase ,**__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( ):
lowercase_ :Dict = git.Repo(search_parent_directories=__lowerCamelCase )
lowercase_ :List[str] = {
"repo_id": str(__lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowerCamelCase : Callable ,__lowerCamelCase : Iterable ):
return list(map(__lowerCamelCase ,__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"wb" ) as f:
return pickle.dump(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def remove_articles(__lowerCamelCase : Optional[int] ):
return re.sub(r"\b(a|an|the)\b" ," " ,__lowerCamelCase )
def white_space_fix(__lowerCamelCase : Dict ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
lowercase_ :Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ):
lowercase_ :Tuple = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Dict = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Tuple = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowercase_ :Tuple = sum(common.values() )
if num_same == 0:
return 0
lowercase_ :Union[str, Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :List[Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ):
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ):
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowercase_ :Any = 0
for hypo, pred in zip(__lowerCamelCase ,__lowerCamelCase ):
em += exact_match_score(__lowerCamelCase ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def UpperCAmelCase_ ( __lowerCamelCase : str ):
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ):
lowercase_ :Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ :List[str] = "dropout_rate"
for p in extra_params:
if getattr(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and not hasattr(__lowerCamelCase ,equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
continue
lowercase_ :List[Any] = p if hasattr(__lowerCamelCase ,__lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase ,__lowerCamelCase ,getattr(__lowerCamelCase ,__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
return hparams, config
| 223 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = [1]
lowercase_ , lowercase_ , lowercase_ = 0, 0, 0
lowercase_ = ugly_nums[ia] * 2
lowercase_ = ugly_nums[ia] * 3
lowercase_ = ugly_nums[ia] * 5
for _ in range(1 , __lowerCAmelCase ):
lowercase_ = min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ugly_nums.append(__lowerCAmelCase )
if next_num == next_a:
ia += 1
lowercase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 313 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = args.log_outputs
A : int = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
A : Dict = load_metric("""wer""" )
A : Dict = load_metric("""cer""" )
# compute metrics
A : Dict = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
A : int = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
A : Union[str, Any] = f'''WER: {wer_result}\nCER: {cer_result}'''
print(_lowerCAmelCase )
with open(f'''{dataset_id}_eval_results.txt''' , """w""" ) as f:
f.write(_lowerCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A : Tuple = f'''log_{dataset_id}_predictions.txt'''
A : Optional[int] = f'''log_{dataset_id}_targets.txt'''
with open(_lowerCAmelCase , """w""" ) as p, open(_lowerCAmelCase , """w""" ) as t:
# mapping function to write output
def write_to_file(_lowerCAmelCase , _lowerCAmelCase ):
p.write(f'''{i}''' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f'''{i}''' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(_lowerCAmelCase , with_indices=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : int = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A : Any = re.sub(_lowerCAmelCase , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A : Optional[int] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
A : Optional[int] = """ """.join(text.split(_lowerCAmelCase ) )
return text
def __UpperCamelCase ( _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : List[str] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_lowerCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A : int = AutoFeatureExtractor.from_pretrained(args.model_id )
A : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
A : Any = dataset.cast_column("""audio""" , Audio(sampling_rate=_lowerCAmelCase ) )
# load eval pipeline
if args.device is None:
A : Tuple = 0 if torch.cuda.is_available() else -1
A : List[Any] = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_lowerCAmelCase ):
A : List[str] = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A : List[str] = prediction["""text"""]
A : Union[str, Any] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
A : List[Any] = dataset.map(_lowerCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:str = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
SCREAMING_SNAKE_CASE_:List[str] = parser.parse_args()
main(args)
| 115 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=14, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=0.02, ):
A : List[str] = parent
A : Any = batch_size
A : Dict = seq_length
A : Tuple = is_training
A : Any = use_input_mask
A : Any = use_token_type_ids
A : Any = use_labels
A : Optional[int] = vocab_size
A : Dict = hidden_size
A : Dict = rotary_dim
A : Dict = num_hidden_layers
A : Tuple = num_attention_heads
A : Tuple = intermediate_size
A : Union[str, Any] = hidden_act
A : Dict = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : str = initializer_range
A : Any = None
A : Any = vocab_size - 1
A : int = vocab_size - 1
A : int = vocab_size - 1
def _lowerCAmelCase ( self ):
A : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Any = random_attention_mask([self.batch_size, self.seq_length] )
A : int = GPTJConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=lowerCamelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, )
return (config, input_ids, input_mask)
def _lowerCAmelCase ( self ):
A : List[str] = self.prepare_config_and_inputs()
A , A , A : List[str] = config_and_inputs
A : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[int] = 20
A : Tuple = model_class_name(lowerCamelCase__ )
A : Dict = model.init_cache(input_ids.shape[0], lowerCamelCase__ )
A : int = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="""i4""" )
A : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
A : List[Any] = model(
input_ids[:, :-1], attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, position_ids=lowerCamelCase__, )
A : List[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="""i4""" )
A : Any = model(
input_ids[:, -1:], attention_mask=lowerCamelCase__, past_key_values=outputs_cache.past_key_values, position_ids=lowerCamelCase__, )
A : Any = model(lowerCamelCase__ )
A : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = 20
A : Any = model_class_name(lowerCamelCase__ )
A : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, )
A : str = model.init_cache(input_ids.shape[0], lowerCamelCase__ )
A : Any = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
A : Optional[int] = model(
input_ids[:, :-1], attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, position_ids=lowerCamelCase__, )
A : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="""i4""" )
A : List[Any] = model(
input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=lowerCamelCase__, position_ids=lowerCamelCase__, )
A : Union[str, Any] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[int] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _lowerCAmelCase ( self ):
A : List[Any] = FlaxGPTJModelTester(self )
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A , A , A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
@tooslow
def _lowerCAmelCase ( self ):
A : int = GPTaTokenizer.from_pretrained("""gpt2""", pad_token="""<|endoftext|>""", padding_side="""left""" )
A : Optional[int] = tokenizer(["""Hello this is a long string""", """Hey"""], return_tensors="""np""", padding=lowerCamelCase__, truncation=lowerCamelCase__ )
A : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
A : str = False
A : Optional[Any] = model.config.eos_token_id
A : Union[str, Any] = jax.jit(model.generate )
A : str = jit_generate(
inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], pad_token_id=tokenizer.pad_token_id ).sequences
A : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__, skip_special_tokens=lowerCamelCase__ )
A : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
@is_pt_flax_cross_test
def _lowerCAmelCase ( self ):
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A : Any = self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ )
A : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
A : str = getattr(lowerCamelCase__, lowerCamelCase__ )
A , A : Optional[int] = pt_inputs["""input_ids"""].shape
A : List[str] = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
A : List[Any] = 0
A : Tuple = 1
A : Optional[int] = 0
A : str = 1
A : Dict = pt_model_class(lowerCamelCase__ ).eval()
A : int = model_class(lowerCamelCase__, dtype=jnp.floataa )
A : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase__ )
A : Dict = fx_state
with torch.no_grad():
A : Optional[int] = pt_model(**lowerCamelCase__ ).to_tuple()
A : str = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ), len(lowerCamelCase__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
A : Union[str, Any] = model_class.from_pretrained(lowerCamelCase__, from_pt=lowerCamelCase__ )
A : Any = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ), len(lowerCamelCase__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2 )
@is_pt_flax_cross_test
def _lowerCAmelCase ( self ):
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A : int = self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
A : Dict = getattr(lowerCamelCase__, lowerCamelCase__ )
A : int = pt_model_class(lowerCamelCase__ ).eval()
A : int = model_class(lowerCamelCase__, dtype=jnp.floataa )
A : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase__, fx_model.params )
A , A : Optional[int] = pt_inputs["""input_ids"""].shape
A : Optional[int] = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
A : Tuple = 0
A : Tuple = 1
A : str = 0
A : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A : List[str] = pt_model(**lowerCamelCase__ ).to_tuple()
A : Optional[int] = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ), len(lowerCamelCase__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
A : str = pt_model_class.from_pretrained(lowerCamelCase__, from_flax=lowerCamelCase__ )
with torch.no_grad():
A : str = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ), len(lowerCamelCase__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
@tooslow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
A : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 115 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
snake_case_ = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
snake_case_ = 0
snake_case_ = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""") | 8 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = None
# source code of `config_class`
lowercase__ = inspect.getsource(SCREAMING_SNAKE_CASE )
lowercase__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase__ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowercase__ = ckpt_name
break
return checkpoint
def _a ( ):
"""simple docstring"""
lowercase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE )
lowercase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '''\n'''.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 110 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 10**9 ):
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCAmelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum ):
a__ : str = 0
a__ : List[Any] = 1
a__ : str = 2
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Optional[Any] , *_lowercase : Any , **_lowercase : Optional[int] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__UpperCAmelCase = None
if self.model.config.prefix is not None:
__UpperCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__UpperCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
__UpperCAmelCase = {**self._preprocess_params, **preprocess_params}
__UpperCAmelCase = {**self._forward_params, **forward_params}
def a ( self : Any , _lowercase : Optional[Any]=None , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=None , **_lowercase : str , ):
__UpperCAmelCase = {}
if prefix is not None:
__UpperCAmelCase = prefix
if prefix:
__UpperCAmelCase = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
__UpperCAmelCase = handle_long_generation
preprocess_params.update(_lowercase )
__UpperCAmelCase = generate_kwargs
__UpperCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.TENSORS
if return_type is not None:
__UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCAmelCase = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a ( self : Optional[int] , *_lowercase : Optional[int] , **_lowercase : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self : List[str] , _lowercase : str , **_lowercase : Optional[Any] ):
return super().__call__(_lowercase , **_lowercase )
def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Dict="" , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
__UpperCAmelCase = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prompt_text
if handle_long_generation == "hole":
__UpperCAmelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__UpperCAmelCase = generate_kwargs['''max_new_tokens''']
else:
__UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__UpperCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__UpperCAmelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__UpperCAmelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def a ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : Optional[int] ):
__UpperCAmelCase = model_inputs['''input_ids''']
__UpperCAmelCase = model_inputs.get('''attention_mask''' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = 1
else:
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__UpperCAmelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__UpperCAmelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__UpperCAmelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__UpperCAmelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__UpperCAmelCase = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
__UpperCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
__UpperCAmelCase = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__UpperCAmelCase = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=ReturnType.FULL_TEXT , _lowercase : List[str]=True ):
__UpperCAmelCase = model_outputs['''generated_sequence'''][0]
__UpperCAmelCase = model_outputs['''input_ids''']
__UpperCAmelCase = model_outputs['''prompt_text''']
__UpperCAmelCase = generated_sequence.numpy().tolist()
__UpperCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__UpperCAmelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__UpperCAmelCase = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
__UpperCAmelCase = prompt_text + text[prompt_length:]
else:
__UpperCAmelCase = text[prompt_length:]
__UpperCAmelCase = {'''generated_text''': all_text}
records.append(_lowercase )
return records
| 86 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = (DEISMultistepScheduler,)
snake_case = (('''num_inference_steps''', 25),)
def lowerCAmelCase ( self : Any , **__UpperCAmelCase : Dict ):
'''simple docstring'''
_A = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**__UpperCAmelCase )
return config
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int=0 , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , __UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**__UpperCAmelCase )
_A = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase )
_A = scheduler_class.from_pretrained(__UpperCAmelCase )
new_scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[: new_scheduler.config.solver_order]
_A , _A = sample, sample
for t in range(__UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Tuple=0 , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , __UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase )
_A = scheduler_class.from_pretrained(__UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[: new_scheduler.config.solver_order]
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
if scheduler is None:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**__UpperCAmelCase )
_A = scheduler_class(**__UpperCAmelCase )
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**__UpperCAmelCase )
_A = scheduler_class(**__UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(__UpperCAmelCase , __UpperCAmelCase )
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , __UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps" ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.10]
_A = dummy_past_residuals[: scheduler.config.solver_order]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = DEISMultistepScheduler(**self.get_scheduler_config() )
_A = self.full_loop(scheduler=__UpperCAmelCase )
_A = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
_A = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_A = DPMSolverMultistepScheduler.from_config(scheduler.config )
_A = UniPCMultistepScheduler.from_config(scheduler.config )
_A = DEISMultistepScheduler.from_config(scheduler.config )
_A = self.full_loop(scheduler=__UpperCAmelCase )
_A = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , algorithm_type="deis" , solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , algorithm_type=__UpperCAmelCase , )
_A = self.full_loop(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , algorithm_type=__UpperCAmelCase , )
assert not torch.isnan(__UpperCAmelCase ).any(), "Samples have nan numbers"
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.check_over_configs(lower_order_final=__UpperCAmelCase )
self.check_over_configs(lower_order_final=__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=0 )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.full_loop()
_A = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.full_loop(prediction_type="v_prediction" )
_A = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0 )
_A = scheduler_class(**__UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(__UpperCAmelCase , __UpperCAmelCase )
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 79 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
_A = sylvester(number - 1 )
_A = num - 1
_A = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 79 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '''▁'''
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_SCREAMING_SNAKE_CASE = {
'''google/pegasus-xsum''': 5_1_2,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = VOCAB_FILES_NAMES
a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : int = PegasusTokenizer
a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<pad>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase="<mask_2>" ,_lowerCamelCase="<mask_1>" ,_lowerCamelCase=None ,_lowerCamelCase=103 ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
__lowercase = offset
if additional_special_tokens is not None:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise TypeError(
f"additional_special_tokens should be of type {type(_lowerCamelCase )}, but is"
f" {type(_lowerCamelCase )}" )
__lowercase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(_lowerCamelCase ) ,self.offset - 1 )
]
if len(set(_lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__lowercase = additional_special_tokens_extended
else:
__lowercase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 ,self.offset )]
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,pad_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,unk_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,mask_token_sent=_lowerCamelCase ,offset=_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _UpperCAmelCase (self ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file ,_lowerCamelCase )
return (out_vocab_file,)
| 217 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "gpt_neox_japanese"
def __init__(self ,_lowerCamelCase=32000 ,_lowerCamelCase=2560 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase=4 ,_lowerCamelCase="gelu" ,_lowerCamelCase=1.0_0 ,_lowerCamelCase=10000 ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=True ,_lowerCamelCase=31996 ,_lowerCamelCase=31999 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0 ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_multiple_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = attention_dropout
__lowercase = hidden_dropout
| 217 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : int = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCAmelCase__ : Optional[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["last_hidden_state"]
lowerCAmelCase__ : List[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,__UpperCAmelCase )
# compare the actual values for a slice.
lowerCAmelCase__ : Optional[Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 37 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_snake_case : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_snake_case : int = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_snake_case : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_snake_case : List[str] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
_snake_case : Any = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
_snake_case : Optional[Any] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
_snake_case : int = tf.keras.preprocessing.image.img_to_array(test_image)
_snake_case : Tuple = np.expand_dims(test_image, axis=0)
_snake_case : Any = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_snake_case : Any = "Normal"
if result[0][0] == 1:
_snake_case : List[str] = "Abnormality detected"
| 123 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Dict=7 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : Union[str, Any]=4_00 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , _UpperCAmelCase : Any=[0.5, 0.5, 0.5] , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : int=1 / 2_55 , _UpperCAmelCase : int=True , ) -> List[str]:
"""simple docstring"""
__lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_pad
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=False ) -> str:
"""simple docstring"""
if not batched:
__lowercase = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__lowercase , __lowercase = image.size
else:
__lowercase , __lowercase = image.shape[1], image.shape[2]
if w < h:
__lowercase = int(self.size['shortest_edge'] * h / w )
__lowercase = self.size['shortest_edge']
elif w > h:
__lowercase = self.size['shortest_edge']
__lowercase = int(self.size['shortest_edge'] * w / h )
else:
__lowercase = self.size['shortest_edge']
__lowercase = self.size['shortest_edge']
else:
__lowercase = []
for image in image_inputs:
__lowercase , __lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__lowercase = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : str = DetaImageProcessor if is_vision_available() else None
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = DetaImageProcessingTester(self )
@property
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowercase = json.loads(f.read() )
__lowercase = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowercase = DetaImageProcessor()
__lowercase = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__lowercase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase )
__lowercase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
__lowercase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase ) )
# verify boxes
__lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase )
__lowercase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
__lowercase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase ) )
# verify is_crowd
__lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase ) )
# verify class_labels
__lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase ) )
# verify orig_size
__lowercase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase ) )
# verify size
__lowercase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase ) )
@slow
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowercase = json.loads(f.read() )
__lowercase = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowercase = DetaImageProcessor(format='coco_panoptic' )
__lowercase = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__lowercase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase )
__lowercase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
__lowercase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase ) )
# verify boxes
__lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase )
__lowercase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
__lowercase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase ) )
# verify is_crowd
__lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase ) )
# verify class_labels
__lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase ) )
# verify masks
__lowercase = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase )
# verify orig_size
__lowercase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase ) )
# verify size
__lowercase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase ) )
| 325 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 1 |
UpperCamelCase__ = "Tobias Carryer"
from time import time
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=int(time() ) ): # noqa: B008
UpperCamelCase__ = multiplier
UpperCamelCase__ = increment
UpperCamelCase__ = modulo
UpperCamelCase__ = seed
def _lowerCamelCase ( self ):
UpperCamelCase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCamelCase__ = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 87 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowerCAmelCase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def __lowerCamelCase ( ) -> Optional[int]:
_a : List[Any] = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_a : Any = get_sagemaker_input()
else:
_a : Optional[Any] = get_cluster_input()
return config
def __lowerCamelCase ( lowerCAmelCase_=None ) -> Any:
if subparsers is not None:
_a : Tuple = subparsers.add_parser('config' , description=lowerCAmelCase_ )
else:
_a : List[Any] = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase_ )
parser.add_argument(
'--config_file' , default=lowerCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
_a : Dict = get_user_input()
if args.config_file is not None:
_a : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
_a : str = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(f"""accelerate configuration saved at {config_file}""" )
def __lowerCamelCase ( ) -> Any:
_a : Union[str, Any] = config_command_parser()
_a : List[Any] = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 89 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = ["""image_processor""", """tokenizer"""]
a_ : List[str] = """ViTImageProcessor"""
a_ : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , a_ : str=None , a_ : Dict=None , **a_ : List[Any] ):
lowerCAmelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
def __call__( self : Union[str, Any] , a_ : Any=None , a_ : Dict=None , a_ : List[str]=None , a_ : str=None , **a_ : Any ):
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None:
lowerCAmelCase_ : Optional[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if images is not None:
lowerCAmelCase_ : List[str] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ : Union[str, Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ : Dict = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def lowerCamelCase ( self : Optional[int] , *a_ : Optional[Any] , **a_ : List[str] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , *a_ : Tuple , **a_ : Tuple ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Dict ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 241 | 0 |
def _A ( lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 100_0000 ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
for current_denominator in range(1 , limit + 1 ):
lowerCAmelCase__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCAmelCase__ = current_numerator
lowerCAmelCase__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 221 |
from statistics import mean, stdev
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = mean(lowerCAmelCase_ )
lowerCAmelCase__ = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 221 | 1 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1_6000 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE__ ) <= sample_length:
return wav
_SCREAMING_SNAKE_CASE : str = randint(0 , len(SCREAMING_SNAKE_CASE__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : Tuple = field(default=lowerCamelCase__ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
A_ : Any = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
A_ : List[str] = field(
default=lowerCamelCase__ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
A_ : Any = field(
default=lowerCamelCase__ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
A_ : Tuple = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
A_ : Dict = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
A_ : Any = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
A_ : Optional[Any] = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
A_ : Any = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
A_ : Optional[Any] = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
A_ : Tuple = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : List[Any] = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
A_ : Optional[int] = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
A_ : List[Any] = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
A_ : Optional[Any] = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
A_ : Optional[int] = field(
default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
A_ : int = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
A_ : Optional[int] = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
A_ : List[Any] = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
A_ : Dict = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
A_ : List[Any] = field(
default=lowerCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCAmelCase_ ( self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , __A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Optional[int] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_SCREAMING_SNAKE_CASE : List[Any] = DatasetDict()
_SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_SCREAMING_SNAKE_CASE : Dict = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_SCREAMING_SNAKE_CASE : str = feature_extractor.model_input_names[0]
def train_transforms(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : int = []
for audio in batch[data_args.audio_column_name]:
_SCREAMING_SNAKE_CASE : List[Any] = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE : Dict = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE : str = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
_SCREAMING_SNAKE_CASE : str = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE : int = raw_datasets['''train'''].features[data_args.label_column_name].names
_SCREAMING_SNAKE_CASE : Any = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : str = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE : Dict = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : List[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=eval_pred.label_ids )
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_SCREAMING_SNAKE_CASE : Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE : int = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE : Dict = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
_SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = last_checkpoint
_SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE : str = trainer.evaluate()
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 200 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> bool:
snake_case_ = str(UpperCAmelCase )
return len(UpperCAmelCase ) == 9 and set(UpperCAmelCase ) == set('123456789' )
def UpperCAmelCase ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
snake_case_ = 100002 * base_num
if is_9_pandigital(UpperCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
snake_case_ = 1002003 * base_num
if is_9_pandigital(UpperCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 312 | """simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=2 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=2 , ):
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : Optional[int] = max_length
UpperCAmelCase__ : int = num_mel_bins
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = scope
UpperCAmelCase__ : str = frequency_stride
UpperCAmelCase__ : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ : Dict = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ : Dict = num_patches + 2
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Dict = self.get_config()
return config, input_values, labels
def snake_case__ ( self):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = ASTModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Any = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase :List[str] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase :List[Any] = False
lowerCAmelCase :Any = False
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :int = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = ASTModelTester(self)
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""")
def snake_case__ ( self):
pass
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear))
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_lowerCamelCase)
UpperCAmelCase__ : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
@slow
def snake_case__ ( self):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = ASTModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
def _UpperCamelCase ( ):
UpperCAmelCase__ : Dict = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase__ , UpperCAmelCase__ : int = torchaudio.load(UpperCamelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase ):
@cached_property
def snake_case__ ( self):
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""")
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.default_feature_extractor
UpperCAmelCase__ : List[str] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""").to(_lowerCamelCase)
UpperCAmelCase__ : str = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ : Dict = prepare_audio()
UpperCAmelCase__ : Dict = audio.squeeze().numpy()
UpperCAmelCase__ : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors="""pt""").to(_lowerCamelCase)
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase)
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , _lowerCamelCase)
UpperCAmelCase__ : Tuple = torch.tensor([-0.8760, -7.0042, -8.6602]).to(_lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4)) | 163 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
UpperCAmelCase__ : str = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ : Union[str, Any] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
assert base_extractor.is_extractable(UpperCamelCase__ )
UpperCAmelCase__ : int = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
UpperCAmelCase__ : str = output_path.read_text(encoding="""utf-8""" )
UpperCAmelCase__ : Union[str, Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
UpperCAmelCase__ : Dict = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
UpperCAmelCase__ : List[str] = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
UpperCAmelCase__ : Dict = Extractor.infer_extractor_format(UpperCamelCase__ )
assert extractor_format is not None
UpperCAmelCase__ : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Dict = file_path.read_text(encoding="""utf-8""" )
else:
UpperCAmelCase__ : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
UpperCAmelCase__ : str = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
import tarfile
UpperCAmelCase__ : Optional[int] = tmp_path / """data_dot_dot"""
directory.mkdir()
UpperCAmelCase__ : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(UpperCamelCase__ , """w""" ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def _UpperCamelCase ( UpperCamelCase__ ):
import tarfile
UpperCAmelCase__ : List[str] = tmp_path / """data_sym_link"""
directory.mkdir()
UpperCAmelCase__ : Optional[int] = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=UpperCamelCase__ )
with tarfile.TarFile(UpperCamelCase__ , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
UpperCAmelCase__ : str = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ : Union[str, Any] = tmp_path / """extracted"""
TarExtractor.extract(UpperCamelCase__ , UpperCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _UpperCamelCase ( UpperCamelCase__ ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
UpperCAmelCase__ : Tuple = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ : Any = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(UpperCamelCase__ )
assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right | 163 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :int ) -> List[str]:
a__ = ['a', 'b', 'c']
# Defaults to last layer if both are None
a__ , a__ = get_aligned_output_features_output_indices(__snake_case ,__snake_case ,__snake_case )
self.assertEqual(__snake_case ,['c'] )
self.assertEqual(__snake_case ,[2] )
# Out indices set to match out features
a__ , a__ = get_aligned_output_features_output_indices(['a', 'c'] ,__snake_case ,__snake_case )
self.assertEqual(__snake_case ,['a', 'c'] )
self.assertEqual(__snake_case ,[0, 2] )
# Out features set to match out indices
a__ , a__ = get_aligned_output_features_output_indices(__snake_case ,[0, 2] ,__snake_case )
self.assertEqual(__snake_case ,['a', 'c'] )
self.assertEqual(__snake_case ,[0, 2] )
# Out features selected from negative indices
a__ , a__ = get_aligned_output_features_output_indices(__snake_case ,[-3, -1] ,__snake_case )
self.assertEqual(__snake_case ,['a', 'c'] )
self.assertEqual(__snake_case ,[-3, -1] )
def lowerCamelCase__( self :str ) -> Any:
# Stage names must be set
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['a', 'b'] ,(0, 1) ,__snake_case )
# Out features must be a list
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(('a', 'b') ,(0, 1) ,['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['a', 'b'] ,(0, 1) ,['a'] )
# Out indices must be a list or tuple
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(__snake_case ,0 ,['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(__snake_case ,(0, 1) ,['a'] )
# Out features and out indices must be the same length
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['a', 'b'] ,(0,) ,['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['a', 'b'] ,(0, 2) ,['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['b', 'a'] ,(0, 1) ,['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] ,(0, 1, -1) ,['a', 'b', 'c', 'd'] )
def lowerCamelCase__( self :Dict ) -> Tuple:
a__ = BackboneMixin()
a__ = ['a', 'b', 'c']
a__ = ['a', 'c']
a__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,['a', 'c'] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
a__ = ['a', 'b']
self.assertEqual(backbone.out_features ,['a', 'b'] )
self.assertEqual(backbone.out_indices ,[0, 1] )
a__ = [-3, -1]
self.assertEqual(backbone.out_features ,['a', 'c'] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 109 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 1 |
import math
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> list:
UpperCamelCase__ : Tuple = [True] * n
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : int = False
UpperCamelCase__ : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase__ : int = i * 2
while index < n:
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Dict = index + i
UpperCamelCase__ : List[str] = [2]
for i in range(3 , SCREAMING_SNAKE_CASE__ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE__ )
return primes
def lowerCAmelCase_ ( __UpperCAmelCase: int = 9999_6666_3333 ) -> int:
UpperCamelCase__ : int = math.floor(math.sqrt(SCREAMING_SNAKE_CASE__ ) ) + 100
UpperCamelCase__ : List[Any] = prime_sieve(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase__ : str = primes[prime_index + 1]
UpperCamelCase__ : str = last_prime**2
UpperCamelCase__ : Dict = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase__ : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase__ : Optional[int] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase__ : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase__ : Union[str, Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 201 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase_ = "true"
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]:
'''simple docstring'''
set_seed(42 )
A__ = RegressionModel()
A__ = deepcopy(SCREAMING_SNAKE_CASE__ )
A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
model.to(accelerator.device )
A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model, ddp_model, dataloader
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
A__ = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
with accelerator.main_process_first():
A__ = dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
A__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ )
A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches )
A__ = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ = []
for batch in dataloader:
A__ , A__ = batch.values()
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ , A__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A__ , A__ = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE__ )
targs.append(SCREAMING_SNAKE_CASE__ )
A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ )
return logits, targs
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]:
'''simple docstring'''
A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert (
len(SCREAMING_SNAKE_CASE__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}'
def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str:
'''simple docstring'''
A__ = evaluate.load('glue' , 'mrpc' )
A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# First do baseline
A__ , A__ , A__ = setup['no']
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE__ )
with torch.inference_mode():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] )
A__ = metric.compute()
# Then do distributed
A__ , A__ , A__ = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
A__ = batch['labels']
A__ , A__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ )
A__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _snake_case( ) -> Optional[Any]:
'''simple docstring'''
A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
A__ = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 )
accelerator.state._reset_state()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 7 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(__snake_case, __snake_case ) ) )
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
if point:
if isinstance(__snake_case, __snake_case ):
for item in point:
if not isinstance(__snake_case, (int, float) ):
_UpperCamelCase = (
'''Expected a list of numbers as input, found '''
F'''{type(__snake_case ).__name__}'''
)
raise TypeError(__snake_case )
else:
_UpperCamelCase = F'''Expected a list of numbers as input, found {type(__snake_case ).__name__}'''
raise TypeError(__snake_case )
else:
raise ValueError('''Missing an input''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(__snake_case, __snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 100 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase( _a ):
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Optional[Any] = """FlavaImageProcessor"""
lowercase_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase, )
_lowercase : List[Any] = kwargs.pop('feature_extractor')
_lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowerCamelCase, lowerCamelCase)
_lowercase : int = self.image_processor
def __call__( self, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = 0, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = True, lowerCamelCase = None, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
_lowercase : Optional[int] = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
if images is not None:
_lowercase : str = self.image_processor(
lowerCamelCase, return_image_mask=lowerCamelCase, return_codebook_pixels=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
if text is not None and images is not None:
encoding.update(lowerCamelCase)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase), tensor_type=lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCamelCase, )
return self.image_processor_class
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCamelCase, )
return self.image_processor
| 21 |
'''simple docstring'''
def snake_case_ (_a : str , _a : str ):
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='aab'
A ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase :
_a = 42
_a = None
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: str , **_lowerCAmelCase: int ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[Any] ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: Any ):
return F"`pip install {cls.pip_package or cls.name}`"
class __lowerCAmelCase ( UpperCamelCase__):
_a = """optuna"""
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_optuna_available()
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Tuple , _lowerCAmelCase: int , _lowerCAmelCase: str , **_lowerCAmelCase: Tuple ):
return run_hp_search_optuna(__a , __a , __a , **__a )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Union[str, Any] ):
return default_hp_space_optuna(__a )
class __lowerCAmelCase ( UpperCamelCase__):
_a = """ray"""
_a = """'ray[tune]'"""
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_ray_available()
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: str , **_lowerCAmelCase: Optional[int] ):
return run_hp_search_ray(__a , __a , __a , **__a )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
return default_hp_space_ray(__a )
class __lowerCAmelCase ( UpperCamelCase__):
_a = """sigopt"""
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_sigopt_available()
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: int , _lowerCAmelCase: str , **_lowerCAmelCase: Optional[Any] ):
return run_hp_search_sigopt(__a , __a , __a , **__a )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Tuple ):
return default_hp_space_sigopt(__a )
class __lowerCAmelCase ( UpperCamelCase__):
_a = """wandb"""
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_wandb_available()
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: int , _lowerCAmelCase: str , **_lowerCAmelCase: Tuple ):
return run_hp_search_wandb(__a , __a , __a , **__a )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int ):
return default_hp_space_wandb(__a )
_UpperCAmelCase : Any = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase__ ( ):
lowercase :Any = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
lowercase :Optional[int] = available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
F"{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 368 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Union[str, Any] ):
lowercase :List[str] = dataset
lowercase :Optional[int] = process
lowercase :Union[str, Any] = params
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: int , _lowerCAmelCase: Dict ):
lowercase :Union[str, Any] = self.dataset[i]
lowercase :Optional[int] = self.process(_lowerCAmelCase , **self.params )
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int]=None ):
lowercase :Optional[Any] = loader
lowercase :int = infer
lowercase :Dict = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowercase :Union[str, Any] = None
lowercase :Any = loader_batch_size
# Internal bookkeeping
lowercase :Optional[Any] = None
lowercase :Dict = None
def __len__( self: Tuple ):
return len(self.loader )
def __iter__( self: List[str] ):
lowercase :Dict = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowercase :Optional[int] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowercase :str = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
lowercase :Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowercase :int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase :List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowercase :Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase :List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowercase :Optional[int] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase :Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase :Any = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowercase :List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowercase :List[Any] = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowercase :Tuple = next(self.iterator )
lowercase :Dict = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
lowercase :List[str] = processed
else:
lowercase :Tuple = list(processed.keys() )[0]
lowercase :Optional[Any] = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Optional[int] = len(_lowerCAmelCase )
else:
lowercase :Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase :Tuple = observed_batch_size
# Setting internal index to unwrap the batch
lowercase :int = processed
lowercase :Optional[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self: Tuple ):
lowercase :List[str] = iter(self.loader )
lowercase :str = None
return self
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
if self.subiterator is None:
lowercase :List[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowercase :str = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowercase :Tuple = self.infer(next(self.iterator ) , **self.params )
lowercase :Dict = next(self.subiterator )
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __iter__( self: str ):
lowercase :List[Any] = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE ( self: str ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowercase :str = False
lowercase :int = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowercase :str = self.loader_batch_item()
lowercase :int = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
lowercase :str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
lowercase :Tuple = processed
else:
lowercase :Union[str, Any] = list(processed.keys() )[0]
lowercase :Any = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Dict = len(_lowerCAmelCase )
else:
lowercase :List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase :Union[str, Any] = observed_batch_size
lowercase :str = processed
lowercase :Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowercase :Any = self.loader_batch_item()
lowercase :int = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
lowercase :Optional[Any] = processed
lowercase :str = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
return accumulator
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Dataset , _lowerCAmelCase: str ):
lowercase :Tuple = dataset
lowercase :Dict = key
def __len__( self: Any ):
return len(self.dataset )
def __getitem__( self: int , _lowerCAmelCase: int ):
return self.dataset[i][self.key]
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: List[Any] , _lowerCAmelCase: Dataset , _lowerCAmelCase: str , _lowerCAmelCase: str ):
lowercase :Union[str, Any] = dataset
lowercase :Optional[int] = keya
lowercase :str = keya
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: Optional[Any] , _lowerCAmelCase: int ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 158 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
__lowerCAmelCase : str = torch.tensor(tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
__lowerCAmelCase : List[Any] = model(_UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
__lowerCAmelCase : List[str] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowerCAmelCase : Optional[int] = logits[0, masked_index, :]
__lowerCAmelCase : Tuple = logits.softmax(dim=0 )
__lowerCAmelCase : int = prob.topk(k=_UpperCamelCase , dim=0 )
__lowerCAmelCase : str = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCamelCase ) )] )
__lowerCAmelCase : Optional[int] = tokenizer.mask_token
__lowerCAmelCase : List[str] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowerCAmelCase : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(_UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(_UpperCamelCase ) , _UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCamelCase , _UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_UpperCamelCase = CamembertTokenizer.from_pretrained("camembert-base")
_UpperCamelCase = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_UpperCamelCase = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 275 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 0 |
from functools import reduce
a__: Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCamelCase__( UpperCamelCase__ : str = N )->int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase__ , UpperCamelCase__ : str(int(UpperCamelCase__ ) * int(UpperCamelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase__ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 39 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe(
[prompt],generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=15,output_type='''np''',use_karras_sigmas=__lowerCamelCase,)
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.