code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__A , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__A , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__A , help='''where to store parsed gold_data_path file''' , )
a_ : str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
a_ : str = json.load(__A )
for dpr_record in tqdm(__A ):
a_ : str = dpr_record['''question''']
a_ : Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__A ) + '''\n''' )
if __name__ == "__main__":
main()
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "nllb-moe"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=12_8112 , __SCREAMING_SNAKE_CASE : List[str]=1024 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=4096 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Dict=4096 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Dict=0.05 , __SCREAMING_SNAKE_CASE : int=0.05 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : int=1024 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]="float32" , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=128 , __SCREAMING_SNAKE_CASE : str=64 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : List[str]=0.001 , __SCREAMING_SNAKE_CASE : str=0.001 , __SCREAMING_SNAKE_CASE : str="all" , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.2 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Tuple=False , **__SCREAMING_SNAKE_CASE : Any , ) -> int:
a_ : List[Any] = vocab_size
a_ : List[str] = max_position_embeddings
a_ : Any = d_model
a_ : Optional[Any] = encoder_ffn_dim
a_ : int = encoder_layers
a_ : Optional[Any] = encoder_attention_heads
a_ : int = decoder_ffn_dim
a_ : Optional[int] = decoder_layers
a_ : Optional[Any] = decoder_attention_heads
a_ : Optional[Any] = dropout
a_ : List[Any] = attention_dropout
a_ : int = activation_dropout
a_ : str = activation_function
a_ : List[Any] = init_std
a_ : Optional[int] = encoder_layerdrop
a_ : Optional[int] = decoder_layerdrop
a_ : Any = use_cache
a_ : Any = encoder_layers
a_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
a_ : Optional[Any] = router_z_loss_coef
a_ : Tuple = router_aux_loss_coef
a_ : List[str] = decoder_sparse_step
a_ : List[Any] = encoder_sparse_step
a_ : str = num_experts
a_ : Union[str, Any] = expert_capacity
a_ : Optional[int] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
a_ : List[Any] = router_dtype
a_ : int = router_ignore_padding_tokens
a_ : str = batch_prioritized_routing
a_ : int = second_expert_policy
a_ : List[Any] = normalize_router_prob_before_dropping
a_ : List[str] = moe_eval_capacity_token_fraction
a_ : Any = moe_token_dropout
a_ : List[Any] = output_router_logits
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int , __A : list ):
_enforce_args(__A , __A )
if n == 0:
return 0
a_ : List[str] = float('''-inf''' )
for i in range(1 , n + 1 ):
a_ : Dict = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def _UpperCAmelCase ( __A : int , __A : list ):
_enforce_args(__A , __A )
a_ : List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def _UpperCAmelCase ( __A : int , __A : list , __A : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
a_ : int = float('''-inf''' )
for i in range(1 , n + 1 ):
a_ : List[str] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
a_ : Optional[int] = max_revenue
return max_rev[n]
def _UpperCAmelCase ( __A : int , __A : list ):
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
a_ : Any = [float('''-inf''' ) for _ in range(n + 1 )]
a_ : int = 0
for i in range(1 , n + 1 ):
a_ : Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
a_ : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
a_ : int = max_revenue_i
return max_rev[n]
def _UpperCAmelCase ( __A : int , __A : list ):
if n < 0:
a_ : List[str] = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(__A )
if n > len(__A ):
a_ : str = (
'''Each integral piece of rod must have a corresponding price. '''
f'Got n = {n} but length of prices = {len(__A )}'
)
raise ValueError(__A )
def _UpperCAmelCase ( ):
a_ : str = [6, 10, 12, 15, 20, 23]
a_ : Tuple = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
a_ : Optional[int] = 36
a_ : Optional[Any] = top_down_cut_rod(__A , __A )
a_ : List[str] = bottom_up_cut_rod(__A , __A )
a_ : List[Any] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
a_ : List[Any] = GenerationConfig(
do_sample=__SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
a_ : str = GenerationConfig.from_pretrained(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __SCREAMING_SNAKE_CASE )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : str = AutoConfig.from_pretrained('''gpt2''' )
a_ : str = GenerationConfig.from_model_config(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
a_ : Tuple = GenerationConfig()
a_ : List[Any] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
a_ : Union[str, Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
a_ : Any = generation_config.update(**__SCREAMING_SNAKE_CASE )
# update_kwargs was not modified (no side effects)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__SCREAMING_SNAKE_CASE , {'''foo''': '''bar'''} )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : Optional[int] = GenerationConfig()
a_ : List[str] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = GenerationConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
a_ : Union[str, Any] = GenerationConfig.from_model_config(__SCREAMING_SNAKE_CASE )
assert not hasattr(__SCREAMING_SNAKE_CASE , '''foo''' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __SCREAMING_SNAKE_CASE )
self.assertEqual(default_config.num_beams , 1 )
a_ : Optional[Any] = GenerationConfig(
do_sample=__SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __SCREAMING_SNAKE_CASE )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = GenerationConfig.from_pretrained(__SCREAMING_SNAKE_CASE , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __SCREAMING_SNAKE_CASE )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Union[str, Any]:
a_ : List[str] = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
a_ : List[Any] = GenerationConfig(
do_sample=__SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
a_ : int = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''test-generation-config''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
a_ : List[Any] = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : Optional[int] = GenerationConfig(
do_sample=__SCREAMING_SNAKE_CASE , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
a_ : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
a_ : int = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Any = 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Optional[int] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
a_ : str = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : Optional[Any] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : str = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
a_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , '''fake-roberta''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
a_ : List[str] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(type(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoConfig.register('''model''' , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoConfig.register('''bert''' , __SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
a_ : str = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
a_ : List[Any] = AutoConfig.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a_ : Optional[int] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
a_ : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "new-model"
try:
AutoConfig.register('''new-model''' , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
a_ : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
a_ : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
a_ : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = CLIPConfig
snake_case__ = ["CLIPEncoderLayer"]
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : CLIPConfig ) -> Dict:
super().__init__(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = CLIPVisionModelWithProjection(config.vision_config )
a_ : List[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
a_ : Any = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any=0.5 , __SCREAMING_SNAKE_CASE : List[str]=0.5 ) -> List[str]:
a_ : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[0]
a_ : Optional[int] = self.p_head(__SCREAMING_SNAKE_CASE )
a_ : str = nsfw_detected.flatten()
a_ : Dict = nsfw_detected > p_threshold
a_ : str = nsfw_detected.tolist()
if any(__SCREAMING_SNAKE_CASE ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(__SCREAMING_SNAKE_CASE ):
if nsfw_detected_:
a_ : Dict = np.zeros(images[idx].shape )
a_ : List[str] = self.w_head(__SCREAMING_SNAKE_CASE )
a_ : Dict = watermark_detected.flatten()
a_ : Union[str, Any] = watermark_detected > w_threshold
a_ : List[str] = watermark_detected.tolist()
if any(__SCREAMING_SNAKE_CASE ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(__SCREAMING_SNAKE_CASE ):
if watermark_detected_:
a_ : Optional[Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : Dict = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
a_ : Tuple = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
a_ : Optional[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__A , output_all_encodings=__A , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
a_ : Optional[Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
a_ : Dict = os.path.join(get_home_dir() , '''models''' )
a_ : List[Any] = _load_vocab(__A , __A , __A , cls=__A )
a_ : str = nlp.model.BERTModel(
__A , len(__A ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__A , use_token_type_embed=__A , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__A , use_decoder=__A , )
original_bort.load_parameters(__A , cast_dtype=__A , ignore_extra=__A )
a_ : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
a_ : List[Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__A ),
}
a_ : List[str] = BertConfig.from_dict(__A )
a_ : Dict = BertForMaskedLM(__A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__A : List[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__A : str , __A : Tuple ):
a_ : Tuple = hf_param.shape
a_ : int = to_torch(params[gluon_param] )
a_ : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
a_ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
a_ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
a_ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
a_ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
a_ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
a_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
a_ : BertSelfAttention = layer.attention.self
a_ : Union[str, Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
a_ : Union[str, Any] = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
a_ : Optional[int] = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
a_ : Tuple = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
a_ : Any = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
a_ : Union[str, Any] = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
a_ : BertSelfOutput = layer.attention.output
a_ : List[Any] = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
a_ : List[Any] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
a_ : Optional[int] = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
a_ : Dict = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
a_ : BertIntermediate = layer.intermediate
a_ : str = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
a_ : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
a_ : BertOutput = layer.output
a_ : Union[str, Any] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
a_ : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
a_ : str = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
a_ : str = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
a_ : int = RobertaTokenizer.from_pretrained('''roberta-base''' )
a_ : Union[str, Any] = tokenizer.encode_plus(__A )['''input_ids''']
# Get gluon output
a_ : int = mx.nd.array([input_ids] )
a_ : List[str] = original_bort(inputs=__A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__A )
a_ : Optional[Any] = BertModel.from_pretrained(__A )
hf_bort_model.eval()
a_ : int = tokenizer.encode_plus(__A , return_tensors='''pt''' )
a_ : Union[str, Any] = hf_bort_model(**__A )[0]
a_ : Tuple = output_gluon[0].asnumpy()
a_ : Optional[Any] = output_hf[0].detach().numpy()
a_ : Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
a_ : Dict = np.allclose(__A , __A , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__A , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _UpperCAmelCase ( __A : Tuple , __A : Optional[Any] ):
a_ : str = _distribute_shards(**__A )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def _UpperCAmelCase ( __A : Optional[int] , __A : Tuple , __A : List[str] ):
a_ : Tuple = _split_gen_kwargs(__A , __A )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def _UpperCAmelCase ( __A : List[str] , __A : Union[str, Any] ):
if expected is RuntimeError:
with pytest.raises(__A ):
_number_of_shards_in_gen_kwargs(__A )
else:
a_ : Union[str, Any] = _number_of_shards_in_gen_kwargs(__A )
assert out == expected
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
import math
def _UpperCAmelCase ( __A : int ):
a_ : Dict = []
a_ : str = 2
a_ : int = int(math.sqrt(__A ) ) # Size of every segment
a_ : List[str] = [True] * (end + 1)
a_ : Optional[int] = []
while start <= end:
if temp[start] is True:
in_prime.append(__A )
for i in range(start * start , end + 1 , __A ):
a_ : Dict = False
start += 1
prime += in_prime
a_ : int = end + 1
a_ : Tuple = min(2 * end , __A )
while low <= n:
a_ : List[str] = [True] * (high - low + 1)
for each in in_prime:
a_ : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__A , high + 1 , __A ):
a_ : Tuple = False
for j in range(len(__A ) ):
if temp[j] is True:
prime.append(j + low )
a_ : int = high + 1
a_ : int = min(high + end , __A )
return prime
print(sieve(10**6))
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : List[str] = get_failure_array(__A )
# 2) Step through text searching for pattern
a_ , a_ : List[str] = 0, 0 # index into text, pattern
while i < len(__A ):
if pattern[j] == text[i]:
if j == (len(__A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a_ : Tuple = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( __A : str ):
a_ : List[Any] = [0]
a_ : int = 0
a_ : Tuple = 1
while j < len(__A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a_ : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(__A )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCAmelCase = 'abc1abc12'
__lowerCAmelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__lowerCAmelCase = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCAmelCase = 'ABABX'
__lowerCAmelCase = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__lowerCAmelCase = 'AAAB'
__lowerCAmelCase = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__lowerCAmelCase = 'abcdabcy'
__lowerCAmelCase = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__lowerCAmelCase = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
a_ : Union[str, Any] = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a_ : Optional[Any] = json.loads(__A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a_ : str = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a_ : Tuple = json.loads(__A )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , __SCREAMING_SNAKE_CASE , )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> "torch.device":
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
a_ : List[str] = torch.device('''cpu''' )
a_ : Any = 0
elif is_sagemaker_model_parallel_available():
a_ : str = smp.local_rank()
a_ : Dict = torch.device('''cuda''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
a_ : Union[str, Any] = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
a_ : Any = torch.device('''cuda''' , self.local_rank )
a_ : Union[str, Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a_ : Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a_ : Tuple = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
a_ : Optional[Any] = torch.device('''cuda''' , self.local_rank )
a_ : Tuple = 1
if device.type == "cuda":
torch.cuda.set_device(__SCREAMING_SNAKE_CASE )
return device
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return False
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCAmelCase = parse(importlib.metadata.version('torch'))
def _UpperCAmelCase ( __A : Union[str, Version] , __A : str , __A : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
a_ : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
a_ : int = parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def _UpperCAmelCase ( __A : str , __A : str ):
return compare_versions(__A , __A , __A )
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__lowerCAmelCase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( __A : List[Any] , __A : int , __A : List[Any] , __A : Dict , __A : List[str] ):
for attribute in key.split('''.''' ):
a_ : Optional[int] = getattr(__A , __A )
if weight_type is not None:
a_ : str = getattr(__A , __A ).shape
else:
a_ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
a_ : str = value
elif weight_type == "weight_g":
a_ : Tuple = value
elif weight_type == "weight_v":
a_ : str = value
elif weight_type == "bias":
a_ : List[Any] = value
else:
a_ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : Any = []
a_ : Dict = fairseq_model.state_dict()
a_ : Dict = hf_model.feature_extractor
for name, value in fairseq_dict.items():
a_ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
a_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a_ : Optional[int] = True
if "*" in mapped_key:
a_ : Optional[int] = name.split(__A )[0].split('''.''' )[-2]
a_ : Tuple = mapped_key.replace('''*''' , __A )
if "weight_g" in name:
a_ : List[str] = '''weight_g'''
elif "weight_v" in name:
a_ : List[Any] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
a_ : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ : Any = '''weight'''
else:
a_ : List[str] = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f'Unused weights: {unused_weights}' )
def _UpperCAmelCase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Any , __A : Optional[int] , __A : Union[str, Any] ):
a_ : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
a_ : Union[str, Any] = name.split('''.''' )
a_ : Optional[int] = int(items[0] )
a_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
a_ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
a_ : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
a_ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
a_ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
@torch.no_grad()
def _UpperCAmelCase ( __A : Optional[int] , __A : Union[str, Any] , __A : int=None ):
# load the pre-trained checkpoints
a_ : Dict = torch.load(__A )
a_ : List[str] = WavLMConfigOrig(checkpoint['''cfg'''] )
a_ : Any = WavLMOrig(__A )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
a_ : Optional[int] = WavLMConfig.from_pretrained(__A )
else:
a_ : List[Any] = WavLMConfig()
a_ : str = WavLMModel(__A )
recursively_load_weights(__A , __A )
hf_wavlm.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__lowerCAmelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : int ) -> None:
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int]=0 ) -> int: # a graph with Node 0,1,...,N-1
a_ : Union[str, Any] = n
a_ : List[Any] = [
[math.inf for j in range(0 , __SCREAMING_SNAKE_CASE )] for i in range(0 , __SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
a_ : List[str] = [
[math.inf for j in range(0 , __SCREAMING_SNAKE_CASE )] for i in range(0 , __SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
a_ : str = w
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
a_ : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__lowerCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__lowerCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _UpperCAmelCase ( __A : Any , __A : str ):
return float((preds == labels).mean() )
def _UpperCAmelCase ( __A : List[Any] , __A : Optional[Any] , __A : str="binary" ):
a_ : Dict = simple_accuracy(__A , __A )
a_ : Tuple = float(fa_score(y_true=__A , y_pred=__A , average=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Dict ):
a_ : Dict = {}
for id_pred, label in zip(__A , __A ):
a_ : int = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
a_ : int = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
a_ : List[Any] = [(pred, label)]
a_ , a_ : Dict = [], []
for question, preds_labels in question_map.items():
a_ , a_ : List[Any] = zip(*__A )
a_ : List[str] = fa_score(y_true=__A , y_pred=__A , average='''macro''' )
fas.append(__A )
a_ : Any = int(sum(pred == label for pred, label in preds_labels ) == len(__A ) )
ems.append(__A )
a_ : str = float(sum(__A ) / len(__A ) )
a_ : int = sum(__A ) / len(__A )
a_ : Dict = float(fa_score(y_true=__A , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ) -> str:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , fa_avg='''macro''' )
elif self.config_name == "record":
a_ : Optional[int] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
a_ : List[Any] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["pixel_values"]
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : str = size if size is not None else {'''shortest_edge''': 224}
a_ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a_ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
a_ : int = do_resize
a_ : int = size
a_ : Union[str, Any] = resample
a_ : Any = do_center_crop
a_ : Optional[Any] = crop_size
a_ : Any = do_rescale
a_ : Optional[Any] = rescale_factor
a_ : Dict = do_normalize
a_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
a_ : int = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> np.ndarray:
a_ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
a_ : Union[str, Any] = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> np.ndarray:
a_ : int = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : str , ) -> np.ndarray:
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Any , ) -> PIL.Image.Image:
a_ : Tuple = do_resize if do_resize is not None else self.do_resize
a_ : str = size if size is not None else self.size
a_ : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = resample if resample is not None else self.resample
a_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : Tuple = crop_size if crop_size is not None else self.crop_size
a_ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : str = do_normalize if do_normalize is not None else self.do_normalize
a_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
a_ : Any = image_std if image_std is not None else self.image_std
a_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : List[str] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : int = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
a_ : List[str] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
a_ : Union[str, Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
a_ : Dict = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
a_ : Any = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
a_ : int = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
a_ : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
a_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _UpperCAmelCase ( __A : Union[str, Any] , __A : List[Any]=0.999 , __A : Optional[Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
a_ : Any = []
for i in range(__A ):
a_ : List[Any] = i / num_diffusion_timesteps
a_ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ) , __A ) )
return torch.tensor(__A , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
snake_case__ = [e.name for e in KarrasDiffusionSchedulers]
snake_case__ = 2
@register_to_config
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : int = 1000 , __SCREAMING_SNAKE_CASE : float = 0.0_0085 , __SCREAMING_SNAKE_CASE : float = 0.012 , __SCREAMING_SNAKE_CASE : str = "linear" , __SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , __SCREAMING_SNAKE_CASE : str = "epsilon" , __SCREAMING_SNAKE_CASE : str = "linspace" , __SCREAMING_SNAKE_CASE : int = 0 , ) -> Optional[Any]:
if trained_betas is not None:
a_ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
a_ : Optional[int] = torch.linspace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a_ : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a_ : Any = betas_for_alpha_bar(__SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
a_ : str = 1.0 - self.betas
a_ : Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=None ) -> str:
if schedule_timesteps is None:
a_ : Any = self.timesteps
a_ : str = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a_ : List[str] = 1 if len(__SCREAMING_SNAKE_CASE ) > 1 else 0
else:
a_ : List[str] = timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
a_ : Union[str, Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
a_ : Tuple = self.index_for_timestep(__SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
a_ : int = self.sigmas[step_index]
else:
a_ : str = self.sigmas_interpol[step_index]
a_ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> str:
a_ : List[Any] = num_inference_steps
a_ : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a_ : Optional[int] = np.linspace(0 , num_train_timesteps - 1 , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a_ : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a_ : int = (np.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(__SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a_ : int = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a_ : Optional[int] = (np.arange(__SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(__SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
a_ : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a_ : Optional[Any] = torch.from_numpy(np.log(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Dict = np.interp(__SCREAMING_SNAKE_CASE , np.arange(0 , len(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a_ : List[Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE )
# interpolate sigmas
a_ : List[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a_ : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a_ : Dict = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
# mps does not support float64
a_ : List[str] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
a_ : str = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
# interpolate timesteps
a_ : int = self.sigma_to_t(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
a_ : Optional[int] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a_ : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] )
a_ : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a_ : int = defaultdict(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str ) -> Dict:
# get log sigma
a_ : Optional[Any] = sigma.log()
# get distribution
a_ : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a_ : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a_ : List[str] = low_idx + 1
a_ : Tuple = self.log_sigmas[low_idx]
a_ : str = self.log_sigmas[high_idx]
# interpolate sigmas
a_ : int = (low - log_sigma) / (low - high)
a_ : Optional[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
a_ : int = (1 - w) * low_idx + w * high_idx
a_ : str = t.view(sigma.shape )
return t
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
return self.sample is None
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , __SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[SchedulerOutput, Tuple]:
a_ : Dict = self.index_for_timestep(__SCREAMING_SNAKE_CASE )
# advance index counter by 1
a_ : int = timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a_ : List[Any] = self.sigmas[step_index]
a_ : Optional[Any] = self.sigmas_interpol[step_index + 1]
a_ : Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a_ : List[Any] = self.sigmas[step_index - 1]
a_ : List[str] = self.sigmas_interpol[step_index]
a_ : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a_ : Union[str, Any] = 0
a_ : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a_ : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
a_ : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a_ : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
a_ : Dict = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a_ : Tuple = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a_ : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
a_ : Dict = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a_ : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a_ : int = sigma_next - sigma_hat
a_ : List[str] = self.sample
a_ : str = None
a_ : Dict = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
a_ : Optional[int] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__SCREAMING_SNAKE_CASE ):
# mps does not support float64
a_ : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a_ : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a_ : Any = self.timesteps.to(original_samples.device )
a_ : Union[str, Any] = timesteps.to(original_samples.device )
a_ : Any = [self.index_for_timestep(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for t in timesteps]
a_ : Dict = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a_ : Union[str, Any] = sigma.unsqueeze(-1 )
a_ : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self : str ) -> Optional[int]:
return self.config.num_train_timesteps
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
from math import isqrt
def _UpperCAmelCase ( __A : int ):
a_ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __A , __A ):
a_ : str = False
return [i for i in range(2 , __A ) if is_prime[i]]
def _UpperCAmelCase ( __A : int = 10**8 ):
a_ : Tuple = calculate_prime_numbers(max_number // 2 )
a_ : Union[str, Any] = 0
a_ : Any = 0
a_ : Optional[Any] = len(__A ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__lowerCAmelCase = namedtuple('covid_data', 'cases deaths recovered')
def _UpperCAmelCase ( __A : str = "https://www.worldometers.info/coronavirus/" ):
a_ : Union[str, Any] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__A ).content ).xpath(__A ) )
__lowerCAmelCase = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
__lowerCAmelCase = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowerCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
__lowerCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__lowerCAmelCase = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "rag"
snake_case__ = True
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Tuple=" / " , __SCREAMING_SNAKE_CASE : Any=" // " , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Dict=300 , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : int="wiki_dpr" , __SCREAMING_SNAKE_CASE : Optional[int]="train" , __SCREAMING_SNAKE_CASE : Union[str, Any]="compressed" , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> List[Any]:
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
a_ : Any = kwargs.pop('''question_encoder''' )
a_ : Dict = question_encoder_config.pop('''model_type''' )
a_ : Optional[int] = kwargs.pop('''generator''' )
a_ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
a_ : List[str] = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Tuple = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : List[str] = reduce_loss
a_ : Any = label_smoothing
a_ : Any = exclude_bos_score
a_ : Union[str, Any] = do_marginalize
a_ : Dict = title_sep
a_ : Union[str, Any] = doc_sep
a_ : Optional[Any] = n_docs
a_ : Tuple = max_combined_length
a_ : Dict = dataset
a_ : List[str] = dataset_split
a_ : int = index_name
a_ : int = retrieval_vector_size
a_ : int = retrieval_batch_size
a_ : int = passages_path
a_ : Optional[int] = index_path
a_ : List[str] = use_dummy_dataset
a_ : Optional[int] = output_retrieved
a_ : Any = do_deduplication
a_ : str = use_cache
if self.forced_eos_token_id is None:
a_ : Optional[Any] = getattr(self.generator , '''forced_eos_token_id''' , __SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : List[Any] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : List[str] = copy.deepcopy(self.__dict__ )
a_ : List[str] = self.question_encoder.to_dict()
a_ : str = self.generator.to_dict()
a_ : Dict = self.__class__.model_type
return output
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : int , __A : int ):
if b == 0:
return (1, 0)
((a_) , (a_)) : Union[str, Any] = extended_euclid(__A , a % b )
a_ : Any = a // b
return (y, x - k * y)
def _UpperCAmelCase ( __A : int , __A : int , __A : int , __A : int ):
((a_) , (a_)) : Tuple = extended_euclid(__A , __A )
a_ : str = na * na
a_ : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def _UpperCAmelCase ( __A : int , __A : int ):
((a_) , (a_)) : Union[str, Any] = extended_euclid(__A , __A )
if b < 0:
a_ : Union[str, Any] = (b % n + n) % n
return b
def _UpperCAmelCase ( __A : int , __A : int , __A : int , __A : int ):
a_ , a_ : Tuple = invert_modulo(__A , __A ), invert_modulo(__A , __A )
a_ : Dict = na * na
a_ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The input training data file (a text file)."} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
if self.train_file is not None:
a_ : Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
a_ : List[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = True
snake_case__ = None
snake_case__ = None
def __call__( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> Tuple:
a_ : str = '''label''' if '''label''' in features[0].keys() else '''labels'''
a_ : str = [feature.pop(__SCREAMING_SNAKE_CASE ) for feature in features]
a_ : List[Any] = len(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = len(features[0]['''input_ids'''] )
a_ : Optional[int] = [
[{k: v[i] for k, v in feature.items()} for i in range(__SCREAMING_SNAKE_CASE )] for feature in features
]
a_ : Union[str, Any] = list(chain(*__SCREAMING_SNAKE_CASE ) )
a_ : Tuple = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
a_ : Union[str, Any] = {k: v.view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , -1 ) for k, v in batch.items()}
# Add back labels
a_ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa )
return batch
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , __A , __A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ : Any = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
a_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
a_ : Any = {}
if data_args.train_file is not None:
a_ : List[str] = data_args.train_file
if data_args.validation_file is not None:
a_ : Tuple = data_args.validation_file
a_ : int = data_args.train_file.split('''.''' )[-1]
a_ : Any = load_dataset(
__A , data_files=__A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
a_ : Any = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
a_ : Optional[int] = [f'ending{i}' for i in range(4 )]
a_ : Union[str, Any] = '''sent1'''
a_ : Union[str, Any] = '''sent2'''
if data_args.max_seq_length is None:
a_ : Dict = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
a_ : Optional[int] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
a_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__A : Any ):
a_ : Tuple = [[context] * 4 for context in examples[context_name]]
a_ : Optional[Any] = examples[question_header_name]
a_ : Any = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(__A )
]
# Flatten out
a_ : Optional[Any] = list(chain(*__A ) )
a_ : int = list(chain(*__A ) )
# Tokenize
a_ : List[Any] = tokenizer(
__A , __A , truncation=__A , max_length=__A , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__A ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
a_ : Optional[Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
a_ : Tuple = min(len(__A ) , data_args.max_train_samples )
a_ : Tuple = train_dataset.select(range(__A ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
a_ : Any = train_dataset.map(
__A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
a_ : Optional[Any] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
a_ : List[str] = min(len(__A ) , data_args.max_eval_samples )
a_ : Union[str, Any] = eval_dataset.select(range(__A ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
a_ : int = eval_dataset.map(
__A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
a_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__A , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__A : Any ):
a_ , a_ : Optional[Any] = eval_predictions
a_ : Tuple = np.argmax(__A , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
a_ : Tuple = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__A , data_collator=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
a_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
a_ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ : Optional[int] = last_checkpoint
a_ : List[Any] = trainer.train(resume_from_checkpoint=__A )
trainer.save_model() # Saves the tokenizer too for easy upload
a_ : Optional[int] = train_result.metrics
a_ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
a_ : Tuple = min(__A , len(__A ) )
trainer.log_metrics('''train''' , __A )
trainer.save_metrics('''train''' , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
a_ : Any = trainer.evaluate()
a_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
a_ : Union[str, Any] = min(__A , len(__A ) )
trainer.log_metrics('''eval''' , __A )
trainer.save_metrics('''eval''' , __A )
a_ : Tuple = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def _UpperCAmelCase ( __A : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCAmelCase ( __A : List[str] ):
a_ : Optional[int] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _UpperCAmelCase ( __A : str ):
a_ , a_ : Dict = emb.weight.shape
a_ : Optional[Any] = nn.Linear(__A , __A , bias=__A )
a_ : Dict = emb.weight.data
return lin_layer
def _UpperCAmelCase ( __A : Any ):
a_ : List[str] = torch.load(__A , map_location='''cpu''' )
a_ : Optional[int] = Namespace(**checkpoint['''cfg''']['''model'''] )
a_ : Tuple = checkpoint['''model''']
remove_ignore_keys_(__A )
a_ : str = state_dict['''decoder.embed_tokens.weight'''].shape[0]
a_ : str = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
a_ : List[str] = XGLMConfig(
vocab_size=__A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
a_ : Tuple = XGLMForCausalLM(__A )
a_ : Any = model.load_state_dict(__A , strict=__A )
print(__A )
a_ : List[str] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__lowerCAmelCase = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _UpperCAmelCase ( __A : Optional[Any] ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__lowerCAmelCase = parser.parse_args()
if args.check_lib:
__lowerCAmelCase = importlib.import_module('transformers')
__lowerCAmelCase = Path(transformers_module.__file__).parent
else:
__lowerCAmelCase = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
a_ : List[str] = hex_num[0] == '''-'''
if is_negative:
a_ : Dict = hex_num[1:]
try:
a_ : str = int(__A , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
a_ : Dict = ''''''
while int_num > 0:
a_ : List[str] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowerCAmelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
a_ : Dict = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
a_ : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
a_ : str = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int=None ) -> Optional[int]:
a_ : str = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
a_ : Tuple = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
a_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
a_ : List[Any] = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ : List[str] = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
a_ : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , f'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''DDPM''' , '''Test''' , __SCREAMING_SNAKE_CASE ) , )
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
__lowerCAmelCase = {str(digit): digit**5 for digit in range(10)}
def _UpperCAmelCase ( __A : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__A ) )
def _UpperCAmelCase ( ):
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(__A ) )
if __name__ == "__main__":
print(solution())
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __get__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=None ) -> Dict:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
a_ : str = '''__cached_''' + self.fget.__name__
a_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if cached is None:
a_ : List[Any] = self.fget(__SCREAMING_SNAKE_CASE )
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cached
def _UpperCAmelCase ( __A : Any ):
a_ : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def _UpperCAmelCase ( __A : List[Any] ):
if is_torch_fx_proxy(__A ):
return True
if is_torch_available():
import torch
if isinstance(__A , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__A , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__A , (jnp.ndarray, Tracer) ):
return True
return isinstance(__A , np.ndarray )
def _UpperCAmelCase ( __A : Any ):
return isinstance(__A , np.ndarray )
def _UpperCAmelCase ( __A : Tuple ):
return _is_numpy(__A )
def _UpperCAmelCase ( __A : Tuple ):
import torch
return isinstance(__A , torch.Tensor )
def _UpperCAmelCase ( __A : Tuple ):
return False if not is_torch_available() else _is_torch(__A )
def _UpperCAmelCase ( __A : List[Any] ):
import torch
return isinstance(__A , torch.device )
def _UpperCAmelCase ( __A : Optional[int] ):
return False if not is_torch_available() else _is_torch_device(__A )
def _UpperCAmelCase ( __A : int ):
import torch
if isinstance(__A , __A ):
if hasattr(__A , __A ):
a_ : Any = getattr(__A , __A )
else:
return False
return isinstance(__A , torch.dtype )
def _UpperCAmelCase ( __A : str ):
return False if not is_torch_available() else _is_torch_dtype(__A )
def _UpperCAmelCase ( __A : List[Any] ):
import tensorflow as tf
return isinstance(__A , tf.Tensor )
def _UpperCAmelCase ( __A : Optional[Any] ):
return False if not is_tf_available() else _is_tensorflow(__A )
def _UpperCAmelCase ( __A : int ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__A , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__A )
return type(__A ) == tf.Tensor
def _UpperCAmelCase ( __A : Dict ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__A )
def _UpperCAmelCase ( __A : Optional[Any] ):
import jax.numpy as jnp # noqa: F811
return isinstance(__A , jnp.ndarray )
def _UpperCAmelCase ( __A : Optional[int] ):
return False if not is_flax_available() else _is_jax(__A )
def _UpperCAmelCase ( __A : List[str] ):
if isinstance(__A , (dict, UserDict) ):
return {k: to_py_obj(__A ) for k, v in obj.items()}
elif isinstance(__A , (list, tuple) ):
return [to_py_obj(__A ) for o in obj]
elif is_tf_tensor(__A ):
return obj.numpy().tolist()
elif is_torch_tensor(__A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__A ):
return np.asarray(__A ).tolist()
elif isinstance(__A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _UpperCAmelCase ( __A : Dict ):
if isinstance(__A , (dict, UserDict) ):
return {k: to_numpy(__A ) for k, v in obj.items()}
elif isinstance(__A , (list, tuple) ):
return np.array(__A )
elif is_tf_tensor(__A ):
return obj.numpy()
elif is_torch_tensor(__A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__A ):
return np.asarray(__A )
else:
return obj
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : Dict = fields(self )
# Safety and consistency checks
if not len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
a_ : List[str] = getattr(self , class_fields[0].name )
a_ : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : Any = first_field.items()
a_ : int = True
else:
try:
a_ : str = iter(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = True
except TypeError:
a_ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__SCREAMING_SNAKE_CASE ):
if (
not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )
or not len(__SCREAMING_SNAKE_CASE ) == 2
or not isinstance(element[0] , __SCREAMING_SNAKE_CASE )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
a_ : List[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
a_ : Optional[int] = element[1]
elif first_field is not None:
a_ : Any = first_field
else:
for field in class_fields:
a_ : Optional[int] = getattr(self , field.name )
if v is not None:
a_ : Any = v
def __delitem__( self : Any , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def SCREAMING_SNAKE_CASE ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def SCREAMING_SNAKE_CASE ( self : List[str] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def SCREAMING_SNAKE_CASE ( self : Any , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __setitem__( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any ) -> str:
# Will raise a KeyException if needed
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ) -> Any:
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "longest"
snake_case__ = "max_length"
snake_case__ = "do_not_pad"
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "pt"
snake_case__ = "tf"
snake_case__ = "np"
snake_case__ = "jax"
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[ContextManager] ) -> Any:
a_ : List[Any] = context_managers
a_ : str = ExitStack()
def __enter__( self : str ) -> int:
for context_manager in self.context_managers:
self.stack.enter_context(__SCREAMING_SNAKE_CASE )
def __exit__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Tuple ) -> Any:
self.stack.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( __A : Optional[int] ):
a_ : int = infer_framework(__A )
if framework == "tf":
a_ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
a_ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
a_ : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _UpperCAmelCase ( __A : Optional[int] ):
a_ : List[str] = model_class.__name__
a_ : Optional[int] = infer_framework(__A )
if framework == "tf":
a_ : List[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
a_ : int = inspect.signature(model_class.forward ) # PyTorch models
else:
a_ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _UpperCAmelCase ( __A : MutableMapping , __A : str = "" , __A : str = "." ):
def _flatten_dict(__A : List[str] , __A : List[Any]="" , __A : List[str]="." ):
for k, v in d.items():
a_ : Optional[Any] = str(__A ) + delimiter + str(__A ) if parent_key else k
if v and isinstance(__A , __A ):
yield from flatten_dict(__A , __A , delimiter=__A ).items()
else:
yield key, v
return dict(_flatten_dict(__A , __A , __A ) )
@contextmanager
def _UpperCAmelCase ( __A : Dict , __A : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Tuple=None ):
if is_numpy_array(__A ):
return np.transpose(__A , axes=__A )
elif is_torch_tensor(__A ):
return array.T if axes is None else array.permute(*__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.transpose(__A , perm=__A )
elif is_jax_tensor(__A ):
return jnp.transpose(__A , axes=__A )
else:
raise ValueError(f'Type not supported for transpose: {type(__A )}.' )
def _UpperCAmelCase ( __A : List[Any] , __A : Optional[Any] ):
if is_numpy_array(__A ):
return np.reshape(__A , __A )
elif is_torch_tensor(__A ):
return array.reshape(*__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.reshape(__A , __A )
elif is_jax_tensor(__A ):
return jnp.reshape(__A , __A )
else:
raise ValueError(f'Type not supported for reshape: {type(__A )}.' )
def _UpperCAmelCase ( __A : Optional[Any] , __A : Union[str, Any]=None ):
if is_numpy_array(__A ):
return np.squeeze(__A , axis=__A )
elif is_torch_tensor(__A ):
return array.squeeze() if axis is None else array.squeeze(dim=__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.squeeze(__A , axis=__A )
elif is_jax_tensor(__A ):
return jnp.squeeze(__A , axis=__A )
else:
raise ValueError(f'Type not supported for squeeze: {type(__A )}.' )
def _UpperCAmelCase ( __A : List[Any] , __A : List[str] ):
if is_numpy_array(__A ):
return np.expand_dims(__A , __A )
elif is_torch_tensor(__A ):
return array.unsqueeze(dim=__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.expand_dims(__A , axis=__A )
elif is_jax_tensor(__A ):
return jnp.expand_dims(__A , axis=__A )
else:
raise ValueError(f'Type not supported for expand_dims: {type(__A )}.' )
def _UpperCAmelCase ( __A : Tuple ):
if is_numpy_array(__A ):
return np.size(__A )
elif is_torch_tensor(__A ):
return array.numel()
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.size(__A )
elif is_jax_tensor(__A ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(__A )}.' )
def _UpperCAmelCase ( __A : Tuple , __A : str ):
for key, value in auto_map.items():
if isinstance(__A , (tuple, list) ):
a_ : Union[str, Any] = [f'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
a_ : Any = f'{repo_id}--{value}'
return auto_map
def _UpperCAmelCase ( __A : Tuple ):
for base_class in inspect.getmro(__A ):
a_ : Union[str, Any] = base_class.__module__
a_ : List[Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Model type selected in the list: " + ", ".join(SCREAMING_SNAKE_CASE_ )} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
snake_case__ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
snake_case__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
snake_case__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
snake_case__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
snake_case__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
snake_case__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
snake_case__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "train"
snake_case__ = "dev"
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : SquadDataTrainingArguments , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Union[str, Split] = Split.train , __SCREAMING_SNAKE_CASE : Optional[bool] = False , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pt" , ) -> int:
a_ : List[str] = args
a_ : Optional[int] = is_language_sensitive
a_ : Union[str, Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
a_ : List[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
a_ : Tuple = mode
# Load data features from cache or dataset file
a_ : List[str] = '''v2''' if args.version_2_with_negative else '''v1'''
a_ : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a_ : Tuple = cached_features_file + '''.lock'''
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
a_ : Tuple = time.time()
a_ : str = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
a_ : Optional[int] = self.old_features['''features''']
a_ : Tuple = self.old_features.get('''dataset''' , __SCREAMING_SNAKE_CASE )
a_ : Tuple = self.old_features.get('''examples''' , __SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
''' future run''' )
else:
if mode == Split.dev:
a_ : Dict = self.processor.get_dev_examples(args.data_dir )
else:
a_ : Tuple = self.processor.get_train_examples(args.data_dir )
a_ , a_ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__SCREAMING_SNAKE_CASE , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__SCREAMING_SNAKE_CASE , )
a_ : str = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , __SCREAMING_SNAKE_CASE , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : str ) -> str:
return len(self.features )
def __getitem__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
a_ : str = self.features[i]
a_ : List[str] = torch.tensor(feature.input_ids , dtype=torch.long )
a_ : Optional[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
a_ : Optional[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
a_ : Tuple = torch.tensor(feature.cls_index , dtype=torch.long )
a_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
a_ : List[str] = torch.tensor(feature.is_impossible , dtype=torch.float )
a_ : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
a_ : Optional[int] = torch.tensor(feature.start_position , dtype=torch.long )
a_ : List[str] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCAmelCase ( __A : int ):
a_ : Dict = prime_factors(__A )
if is_square_free(__A ):
return -1 if len(__A ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
snake_case__ = 1
@register_to_config
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=2000 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=20 , __SCREAMING_SNAKE_CASE : List[Any]=1e-3 ) -> Dict:
a_ : Any = None
a_ : str = None
a_ : Tuple = None
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, torch.device] = None ) -> Union[str, Any]:
a_ : Optional[Any] = torch.linspace(1 , self.config.sampling_eps , __SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[Any]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
a_ : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
a_ : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
a_ : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
a_ : Dict = std.unsqueeze(-1 )
a_ : Dict = -score / std
# compute
a_ : Any = -1.0 / len(self.timesteps )
a_ : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
a_ : int = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
a_ : Any = beta_t.unsqueeze(-1 )
a_ : Union[str, Any] = -0.5 * beta_t * x
a_ : str = torch.sqrt(__SCREAMING_SNAKE_CASE )
a_ : List[str] = drift - diffusion**2 * score
a_ : Dict = x + drift * dt
# add noise
a_ : Dict = randn_tensor(x.shape , layout=x.layout , generator=__SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
a_ : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[Any] ) -> List[str]:
return self.config.num_train_timesteps
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.txt'}
__lowerCAmelCase = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__lowerCAmelCase = {
'openbmb/cpm-ant-10b': 1_024,
}
def _UpperCAmelCase ( __A : Any ):
a_ : Union[str, Any] = collections.OrderedDict()
with open(__A , '''r''' , encoding='''utf-8''' ) as reader:
a_ : Optional[Any] = reader.readlines()
for index, token in enumerate(__A ):
a_ : Optional[Any] = token.rstrip('''\n''' )
a_ : str = index
return vocab
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]=200 ) -> Dict:
a_ : int = vocab
a_ : str = unk_token
a_ : int = max_input_chars_per_word
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Dict ) -> int:
a_ : List[str] = list(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word:
return [self.unk_token]
a_ : Any = 0
a_ : Any = []
while start < len(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = len(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = None
while start < end:
a_ : Dict = ''''''.join(chars[start:end] )
if substr in self.vocab:
a_ : List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = end
return sub_tokens
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = False
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict="<d>" , __SCREAMING_SNAKE_CASE : int="</d>" , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<pad>" , __SCREAMING_SNAKE_CASE : int="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="</n>" , __SCREAMING_SNAKE_CASE : str="</_>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="left" , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__SCREAMING_SNAKE_CASE , eod_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , line_token=__SCREAMING_SNAKE_CASE , space_token=__SCREAMING_SNAKE_CASE , padding_side=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
a_ : Union[str, Any] = bod_token
a_ : str = eod_token
a_ : int = load_vocab(__SCREAMING_SNAKE_CASE )
a_ : Any = self.encoder[space_token]
a_ : List[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a_ : Tuple = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __SCREAMING_SNAKE_CASE : x[1] ) )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
a_ : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
a_ : Tuple = []
for x in jieba.cut(__SCREAMING_SNAKE_CASE , cut_all=__SCREAMING_SNAKE_CASE ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) )
return output_tokens
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
a_ : Any = [i for i in token_ids if i >= 0]
a_ : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
return token in self.encoder
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
return "".join(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Dict:
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
a_ : List[Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
a_ : Dict = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
a_ : Optional[int] = 0
if " " in self.encoder:
a_ : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
a_ : str = self.encoder['''\n''']
del self.encoder["\n"]
a_ : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __SCREAMING_SNAKE_CASE : x[1] ) )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
a_ : int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _UpperCAmelCase ( __A : Optional[int] ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _UpperCAmelCase ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
a_ : Optional[int] = [1, 2, 3]
with pytest.raises(__A ):
with parallel_backend('''unsupported backend''' ):
map_nested(__A , __A , num_proc=2 )
with pytest.raises(__A ):
with parallel_backend('''unsupported backend''' ):
map_nested(__A , __A , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _UpperCAmelCase ( __A : Optional[Any] ):
a_ : List[Any] = [1, 2]
a_ : List[Any] = {'''a''': 1, '''b''': 2}
a_ : Union[str, Any] = {'''a''': [1, 2], '''b''': [3, 4]}
a_ : Any = {'''a''': {'''1''': 1}, '''b''': 2}
a_ : Union[str, Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
a_ : str = [2, 3]
a_ : Optional[int] = {'''a''': 2, '''b''': 3}
a_ : List[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
a_ : List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
a_ : Optional[int] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]:
a_ : Dict = parent
a_ : Union[str, Any] = 13
a_ : int = 7
a_ : List[Any] = True
a_ : Optional[Any] = True
a_ : Union[str, Any] = True
a_ : Union[str, Any] = 99
a_ : Tuple = 32
a_ : Dict = 2
a_ : Optional[Any] = 4
a_ : Union[str, Any] = 37
a_ : Dict = '''gelu'''
a_ : Union[str, Any] = 0.1
a_ : int = 0.1
a_ : Dict = 512
a_ : Optional[Any] = 16
a_ : List[Any] = 2
a_ : Optional[Any] = 0.02
a_ : Any = 3
a_ : Tuple = 4
a_ : Union[str, Any] = None
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : int = None
if self.use_input_mask:
a_ : str = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Union[str, Any] = None
a_ : Union[str, Any] = None
a_ : Optional[Any] = None
if self.use_labels:
a_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : int = ids_tensor([self.batch_size] , self.num_choices )
a_ : Optional[Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : int = self.prepare_config_and_inputs()
a_ : int = True
a_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
a_ : str = TFEsmModel(config=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
a_ : int = [input_ids, input_mask]
a_ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , ) -> int:
a_ : Any = True
a_ : Union[str, Any] = TFEsmModel(config=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
a_ : Dict = model(__SCREAMING_SNAKE_CASE )
a_ : str = [input_ids, input_mask]
a_ : Any = model(__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE )
# Also check the case where encoder outputs are not passed
a_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
a_ : Optional[int] = TFEsmForMaskedLM(config=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
a_ : str = self.num_labels
a_ : int = TFEsmForTokenClassification(config=__SCREAMING_SNAKE_CASE )
a_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a_ : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : List[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Union[str, Any] = config_and_inputs
a_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Any = TFEsmModelTester(self )
a_ : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = TFEsmModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a_ : Dict = model.get_bias()
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for k, v in name.items():
assert isinstance(__SCREAMING_SNAKE_CASE , tf.Variable )
else:
a_ : Union[str, Any] = model.get_output_embeddings()
assert x is None
a_ : List[Any] = model.get_bias()
assert name is None
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : str = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
a_ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
a_ : List[str] = model(__SCREAMING_SNAKE_CASE )[0]
a_ : Optional[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
a_ : List[Any] = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : List[str] = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
a_ : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a_ : Tuple = model(__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
a_ : str = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : List[Any] , __A : str=False , __A : List[str]=False ):
a_ : str = '''backbone.''' if is_semantic else ''''''
a_ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', '''beit.embeddings.cls_token'''),
(f'{prefix}patch_embed.proj.weight', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'{prefix}patch_embed.proj.bias', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'{prefix}pos_embed', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _UpperCAmelCase ( __A : int , __A : Dict , __A : Optional[int]=False , __A : Tuple=False ):
for i in range(config.num_hidden_layers ):
a_ : List[str] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
a_ : Union[str, Any] = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
a_ : Any = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
a_ : Any = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
a_ : int = in_proj_weight[
: config.hidden_size, :
]
a_ : Union[str, Any] = q_bias
a_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
a_ : Dict = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
a_ : str = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
a_ : Optional[Any] = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
a_ : Union[str, Any] = gamma_a
a_ : int = gamma_a
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Any , __A : List[Any] ):
a_ : Optional[Any] = dct.pop(__A )
a_ : List[str] = val
def _UpperCAmelCase ( ):
a_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : Tuple = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : Tuple , __A : Any , __A : List[Any]=False ):
a_ : Optional[Any] = False if '''rvlcdip''' in checkpoint_url else True
a_ : Tuple = BeitConfig(use_absolute_position_embeddings=__A , use_mask_token=__A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
a_ : Dict = 10_24
a_ : int = 40_96
a_ : Tuple = 24
a_ : str = 16
# labels
if "rvlcdip" in checkpoint_url:
a_ : Tuple = 16
a_ : List[Any] = '''huggingface/label-files'''
a_ : int = '''rvlcdip-id2label.json'''
a_ : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
a_ : int = {int(__A ): v for k, v in idalabel.items()}
a_ : Dict = idalabel
a_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
a_ : str = torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' )['''model''']
a_ : Any = create_rename_keys(__A , has_lm_head=__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , has_lm_head=__A )
# load HuggingFace model
a_ : int = BeitForMaskedImageModeling(__A ) if has_lm_head else BeitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# Check outputs on an image
a_ : Tuple = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__A )
a_ : List[str] = prepare_img()
a_ : Union[str, Any] = image_processor(images=__A , return_tensors='''pt''' )
a_ : List[str] = encoding['''pixel_values''']
a_ : Optional[int] = model(__A )
a_ : str = outputs.logits
# verify logits
a_ : Union[str, Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__A ), "Shape of logits not as expected"
Path(__A ).mkdir(exist_ok=__A )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if push_to_hub:
if has_lm_head:
a_ : str = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
a_ : int = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__A , )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__A , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__lowerCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "tapas"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Any=3_0522 , __SCREAMING_SNAKE_CASE : Tuple=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : int=3072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=1024 , __SCREAMING_SNAKE_CASE : Dict=[3, 256, 256, 2, 256, 256, 10] , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Any=1e-12 , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : int=10.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[str]=1.0 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : List[Any]="ratio" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]=64 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> str:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
a_ : Union[str, Any] = vocab_size
a_ : Optional[Any] = hidden_size
a_ : str = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Any = hidden_act
a_ : Dict = intermediate_size
a_ : Optional[int] = hidden_dropout_prob
a_ : str = attention_probs_dropout_prob
a_ : Tuple = max_position_embeddings
a_ : int = type_vocab_sizes
a_ : Tuple = initializer_range
a_ : Any = layer_norm_eps
# Fine-tuning task hyperparameters
a_ : Any = positive_label_weight
a_ : int = num_aggregation_labels
a_ : Union[str, Any] = aggregation_loss_weight
a_ : List[str] = use_answer_as_supervision
a_ : Any = answer_loss_importance
a_ : int = use_normalized_answer_loss
a_ : Optional[int] = huber_loss_delta
a_ : List[str] = temperature
a_ : Union[str, Any] = aggregation_temperature
a_ : str = use_gumbel_for_cells
a_ : List[Any] = use_gumbel_for_aggregation
a_ : Optional[Any] = average_approximation_function
a_ : Dict = cell_selection_preference
a_ : str = answer_loss_cutoff
a_ : Union[str, Any] = max_num_rows
a_ : Optional[int] = max_num_columns
a_ : str = average_logits_per_cell
a_ : Tuple = select_one_column
a_ : List[str] = allow_empty_column_selection
a_ : List[str] = init_cell_selection_weights_to_zero
a_ : Union[str, Any] = reset_position_index_per_cell
a_ : Dict = disable_per_token_loss
# Aggregation hyperparameters
a_ : List[str] = aggregation_labels
a_ : Dict = no_aggregation_label_index
if isinstance(self.aggregation_labels , __SCREAMING_SNAKE_CASE ):
a_ : str = {int(__SCREAMING_SNAKE_CASE ): v for k, v in aggregation_labels.items()}
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCAmelCase ( __A : int , __A : int , __A : int , __A : int , __A : int , __A : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
a_ : int = ksize + 1
a_ : str = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__A ):
for x in range(__A ):
# distance from center
a_ : Dict = x - ksize // 2
a_ : Any = y - ksize // 2
# degree to radiant
a_ : Optional[int] = theta / 1_80 * np.pi
a_ : List[Any] = np.cos(_theta )
a_ : Optional[int] = np.sin(_theta )
# get kernel x
a_ : Tuple = cos_theta * px + sin_theta * py
# get kernel y
a_ : List[Any] = -sin_theta * px + cos_theta * py
# fill kernel
a_ : List[str] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCAmelCase = imread('../image_data/lena.jpg')
# turn image in gray scale value
__lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCAmelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__lowerCAmelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCAmelCase = out / out.max() * 255
__lowerCAmelCase = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _UpperCAmelCase ( __A : int ):
if not isinstance(__A , __A ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
a_ : Optional[int] = precision
a_ : Any = ceil(precision / 14 )
a_ : Optional[Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
a_ : List[str] = 1
a_ : Optional[Any] = 13_59_14_09
a_ : Any = Decimal(__A )
for k in range(1 , __A ):
a_ : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__A ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__lowerCAmelCase = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__lowerCAmelCase = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = RoFormerTokenizer
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[str]="[UNK]" , __SCREAMING_SNAKE_CASE : List[str]="[SEP]" , __SCREAMING_SNAKE_CASE : Union[str, Any]="[PAD]" , __SCREAMING_SNAKE_CASE : List[str]="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[Any]="[MASK]" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ) -> Dict:
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
a_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
a_ : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
a_ : Tuple = do_lower_case
a_ : int = strip_accents
a_ : List[str] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
a_ : Dict = do_lower_case
def __getstate__( self : str ) -> Tuple:
a_ : List[Any] = self.__dict__.copy()
a_ : Tuple = BertPreTokenizer()
return state
def __setstate__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
a_ : int = d
a_ : Optional[Any] = self.__dict__['''_tokenizer'''].get_vocab()
a_ : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> Union[str, Any]:
a_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
a_ : List[str] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=False , **__SCREAMING_SNAKE_CASE : Any , ) -> str:
a_ : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _UpperCAmelCase ( __A : Optional[Any] ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _UpperCAmelCase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _UpperCAmelCase ( ):
a_ : Any = '''mock-s3-bucket'''
a_ : List[Any] = f's3://{mock_bucket}'
a_ : List[Any] = extract_path_from_uri(__A )
assert dataset_path.startswith('''s3://''' ) is False
a_ : int = '''./local/path'''
a_ : Dict = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def _UpperCAmelCase ( __A : Optional[int] ):
a_ : List[str] = is_remote_filesystem(__A )
assert is_remote is True
a_ : str = fsspec.filesystem('''file''' )
a_ : Union[str, Any] = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __A )
def _UpperCAmelCase ( __A : List[Any] , __A : Any , __A : List[Any] , __A : Dict , __A : Optional[Any] , __A : int , __A : Union[str, Any] ):
a_ : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
a_ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
a_ : List[str] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
a_ : Tuple = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
a_ : List[str] = os.path.basename(__A )
a_ : Optional[int] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__A , '''r''' , encoding='''utf-8''' ) as f, open(__A , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def _UpperCAmelCase ( __A : List[str] , __A : Optional[Any] , __A : List[str] ):
a_ : int = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
a_ : List[Any] = compressed_file_paths[protocol]
a_ : Dict = '''dataset.jsonl'''
a_ : List[str] = f'{protocol}://{member_file_path}::{compressed_file_path}'
a_ , *a_ : Optional[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def _UpperCAmelCase ( __A : int , __A : Optional[Any] , __A : str , __A : Optional[int] ):
a_ : Dict = hf_api.dataset_info(__A , token=__A )
a_ : int = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__A ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def _UpperCAmelCase ( ):
a_ : str = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=99 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : Tuple=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=512 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[str]=None , ) -> Optional[int]:
a_ : str = parent
a_ : List[str] = batch_size
a_ : Tuple = seq_length
a_ : Optional[Any] = is_training
a_ : List[Any] = use_token_type_ids
a_ : Dict = use_labels
a_ : Dict = vocab_size
a_ : Tuple = hidden_size
a_ : List[str] = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : Any = intermediate_size
a_ : Dict = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : List[Any] = max_position_embeddings
a_ : Any = type_vocab_size
a_ : Dict = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Dict = num_labels
a_ : int = num_choices
a_ : Tuple = scope
a_ : Optional[Any] = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Union[str, Any] = None
a_ : int = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
a_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , *__SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
a_ : Union[str, Any] = OpenAIGPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : List[str] = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , *__SCREAMING_SNAKE_CASE : str ) -> List[Any]:
a_ : Optional[int] = OpenAIGPTLMHeadModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : List[str] = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , *__SCREAMING_SNAKE_CASE : str ) -> Tuple:
a_ : Union[str, Any] = OpenAIGPTDoubleHeadsModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Dict ) -> str:
a_ : Tuple = self.num_labels
a_ : Any = OpenAIGPTForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Tuple = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : Dict = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Tuple = config_and_inputs
a_ : Any = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=False ) -> Tuple:
a_ : Optional[int] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE , )
a_ : Any = inputs_dict['''labels''']
a_ : int = inputs_dict['''labels''']
a_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE , )
a_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
a_ : Tuple = OpenAIGPTModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
a_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(__SCREAMING_SNAKE_CASE )
a_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) # the president is
a_ : Optional[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ : Tuple = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() , __SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['ConditionalDetrFeatureExtractor']
__lowerCAmelCase = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
from ....utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=2048 ) -> Union[str, Any]:
a_ : List[str] = config.__dict__
a_ : Union[str, Any] = modal_hidden_size
if num_labels:
a_ : Any = num_labels
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__lowerCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__lowerCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "whisper"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=5_1865 , __SCREAMING_SNAKE_CASE : List[str]=80 , __SCREAMING_SNAKE_CASE : Dict=6 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : Any=6 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Dict=1536 , __SCREAMING_SNAKE_CASE : Optional[int]=1536 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=5_0257 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Any=256 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Optional[Any]=1500 , __SCREAMING_SNAKE_CASE : Any=448 , __SCREAMING_SNAKE_CASE : str=5_0256 , __SCREAMING_SNAKE_CASE : Optional[int]=5_0256 , __SCREAMING_SNAKE_CASE : Tuple=5_0256 , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=[220, 5_0256] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=0.05 , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : str=7 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]:
a_ : Optional[int] = vocab_size
a_ : Dict = num_mel_bins
a_ : Union[str, Any] = d_model
a_ : str = encoder_layers
a_ : Union[str, Any] = encoder_attention_heads
a_ : str = decoder_layers
a_ : Optional[Any] = decoder_attention_heads
a_ : List[Any] = decoder_ffn_dim
a_ : Optional[Any] = encoder_ffn_dim
a_ : int = dropout
a_ : str = attention_dropout
a_ : Tuple = activation_dropout
a_ : List[str] = activation_function
a_ : int = init_std
a_ : str = encoder_layerdrop
a_ : int = decoder_layerdrop
a_ : Union[str, Any] = use_cache
a_ : Any = encoder_layers
a_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
a_ : Optional[int] = max_source_positions
a_ : Optional[int] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a_ : Optional[Any] = classifier_proj_size
a_ : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ : str = apply_spec_augment
a_ : List[Any] = mask_time_prob
a_ : int = mask_time_length
a_ : Tuple = mask_time_min_masks
a_ : Any = mask_feature_prob
a_ : Optional[Any] = mask_feature_length
a_ : Tuple = mask_feature_min_masks
a_ : List[Any] = median_filter_width
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , suppress_tokens=__SCREAMING_SNAKE_CASE , begin_suppress_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
a_ : Tuple = {0: '''batch'''}
else:
a_ : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , __SCREAMING_SNAKE_CASE : int = 2_2050 , __SCREAMING_SNAKE_CASE : float = 5.0 , __SCREAMING_SNAKE_CASE : int = 220 , ) -> Mapping[str, Any]:
a_ : List[str] = OrderedDict()
a_ : int = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , time_duration=__SCREAMING_SNAKE_CASE , frequency=__SCREAMING_SNAKE_CASE , )
a_ : Optional[Any] = encoder_inputs['''input_features'''].shape[2]
a_ : int = encoder_sequence_length // 2 if self.use_past else seq_length
a_ : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = encoder_inputs.pop('''input_features''' )
a_ : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
a_ : Tuple = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-3
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def _UpperCAmelCase ( __A : List[Any] , __A : List[Any] ):
# ===== initialization =====
a_ : str = Mock()
a_ : List[str] = conn, Mock()
a_ : Dict = iter([1, None] )
a_ : List[str] = lambda __A : next(__A )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=__A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCAmelCase = get_logger()
__lowerCAmelCase = None
class SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
super().__init__(features=__SCREAMING_SNAKE_CASE )
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
a_ : Union[str, Any] = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a_ : Optional[int] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
a_ : Any = str(jax.devices()[0] )
a_ : Tuple = jnp_array_kwargs
@staticmethod
def SCREAMING_SNAKE_CASE ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__SCREAMING_SNAKE_CASE ): device for device in jax.devices()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0 )
return column
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a_ : List[Any] = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
a_ : str = {'''dtype''': jnp.intaa}
else:
a_ : Union[str, Any] = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a_ : Tuple = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a_ : Union[str, Any] = np.asarray(__SCREAMING_SNAKE_CASE )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a_ : List[str] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''' ) and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array ):
a_ : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : dict ) -> int:
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : pa.Table ) -> Mapping:
a_ : Dict = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE )
a_ : str = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : pa.Table ) -> "jax.Array":
a_ : Optional[Any] = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
a_ : Any = self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self._consolidate(__SCREAMING_SNAKE_CASE )
return column
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : pa.Table ) -> Mapping:
a_ : Optional[Any] = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE )
a_ : Dict = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE )
a_ : str = self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
for column_name in batch:
a_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = (KDPMaDiscreteScheduler,)
snake_case__ = 10
def SCREAMING_SNAKE_CASE ( self : Tuple , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
a_ : List[Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Tuple = self.scheduler_classes[0]
a_ : List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
a_ : int = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
a_ : Dict = self.dummy_model()
a_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ : Union[str, Any] = sample.to(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
a_ : Union[str, Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : int = output.prev_sample
a_ : Tuple = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
if torch_device == "mps":
return
a_ : List[str] = self.scheduler_classes[0]
a_ : Any = self.get_scheduler_config()
a_ : List[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
a_ : List[str] = self.dummy_model()
a_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ : Union[str, Any] = sample.to(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
a_ : Any = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = output.prev_sample
a_ : List[str] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
a_ : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
if torch_device == "mps":
return
a_ : Dict = self.scheduler_classes[0]
a_ : Tuple = self.get_scheduler_config()
a_ : Optional[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = self.dummy_model()
a_ : Optional[int] = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a_ : Optional[Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = output.prev_sample
a_ : Union[str, Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[int] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if str(__SCREAMING_SNAKE_CASE ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "roc_bert"
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str=3_0522 , __SCREAMING_SNAKE_CASE : List[str]=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1e-12 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : str="absolute" , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Dict=910 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : int=2_4858 , __SCREAMING_SNAKE_CASE : Any=True , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> str:
a_ : int = vocab_size
a_ : str = max_position_embeddings
a_ : str = hidden_size
a_ : Tuple = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Tuple = intermediate_size
a_ : List[str] = hidden_act
a_ : Optional[Any] = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : Dict = initializer_range
a_ : str = type_vocab_size
a_ : Optional[int] = layer_norm_eps
a_ : Union[str, Any] = use_cache
a_ : int = enable_pronunciation
a_ : List[Any] = enable_shape
a_ : List[str] = pronunciation_embed_dim
a_ : Optional[Any] = pronunciation_vocab_size
a_ : int = shape_embed_dim
a_ : Optional[int] = shape_vocab_size
a_ : Union[str, Any] = concat_input
a_ : Optional[Any] = position_embedding_type
a_ : Union[str, Any] = classifier_dropout
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> Union[str, Any]:
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
a_ : List[Any] = field
a_ : int = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
a_ : Optional[int] = Json(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , field=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# Build iterable dataset
if self.streaming:
a_ : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a_ : int = None
a_ : List[str] = None
a_ : Dict = None
a_ : Optional[Any] = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
a_ : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dataset , __SCREAMING_SNAKE_CASE : Union[PathLike, BinaryIO] , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[int]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
a_ : int = dataset
a_ : List[str] = path_or_buf
a_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a_ : Tuple = num_proc
a_ : int = '''utf-8'''
a_ : int = to_json_kwargs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
a_ : Any = self.to_json_kwargs.pop('''path_or_buf''' , __SCREAMING_SNAKE_CASE )
a_ : List[str] = self.to_json_kwargs.pop('''orient''' , '''records''' )
a_ : Tuple = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
a_ : str = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
a_ : Tuple = self.to_json_kwargs.pop('''compression''' , __SCREAMING_SNAKE_CASE )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__SCREAMING_SNAKE_CASE ) as buffer:
a_ : int = self._write(file_obj=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''' )
a_ : str = self._write(
file_obj=self.path_or_buf , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs )
return written
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
a_ , a_ , a_ , a_ , a_ : str = args
a_ : Tuple = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , )
a_ : Tuple = batch.to_pandas().to_json(
path_or_buf=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : BinaryIO , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : int , ) -> int:
a_ : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
a_ : str = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__SCREAMING_SNAKE_CASE )
else:
a_ , a_ : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__SCREAMING_SNAKE_CASE )
return written
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( __A : Tuple ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : ArgumentParser ) -> Dict:
a_ : List[Any] = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the model to download''' )
download_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : bool ) -> Tuple:
a_ : int = model
a_ : Any = cache
a_ : List[Any] = force
a_ : Optional[Any] = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : Dict=400 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=1 / 255 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
a_ : Dict = parent
a_ : str = batch_size
a_ : Optional[Any] = num_channels
a_ : List[str] = min_resolution
a_ : Optional[Any] = max_resolution
a_ : int = do_resize
a_ : int = size
a_ : int = do_normalize
a_ : str = image_mean
a_ : Dict = image_std
a_ : Optional[Any] = do_rescale
a_ : List[str] = rescale_factor
a_ : str = do_pad
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> Dict:
if not batched:
a_ : int = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
a_ , a_ : str = image.size
else:
a_ , a_ : Any = image.shape[1], image.shape[2]
if w < h:
a_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
a_ : str = self.size['''shortest_edge''']
elif w > h:
a_ : List[str] = self.size['''shortest_edge''']
a_ : List[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
a_ : Union[str, Any] = self.size['''shortest_edge''']
a_ : Union[str, Any] = self.size['''shortest_edge''']
else:
a_ : Any = []
for image in image_inputs:
a_ , a_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ : Union[str, Any] = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
a_ : Dict = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : Union[str, Any] = ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
a_ : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
# Initialize image_processing
a_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Optional[int] = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Any = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
# Initialize image_processing
a_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
a_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ : Union[str, Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
a_ , a_ : int = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
# Initialize image_processing
a_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Dict = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
a_ , a_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
# prepare image and target
a_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a_ : List[str] = json.loads(f.read() )
a_ : Optional[Any] = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
a_ : Dict = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
a_ : Tuple = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# verify pixel values
a_ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE )
a_ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
a_ : Any = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
a_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE )
a_ : int = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
a_ : int = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
a_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
a_ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
a_ : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) )
# verify size
a_ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
# prepare image, target and masks_path
a_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a_ : Optional[int] = json.loads(f.read() )
a_ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
a_ : int = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a_ : str = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
a_ : List[str] = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# verify pixel values
a_ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE )
a_ : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
a_ : Union[str, Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
a_ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
a_ : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
a_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
a_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) )
# verify masks
a_ : Tuple = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
a_ : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) )
# verify size
a_ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) )
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = GPTSanJapaneseTokenizer
snake_case__ = False
snake_case__ = {"do_clean_text": False, "add_prefix_space": False}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
super().setUp()
# fmt: off
a_ : Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
a_ : Any = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
a_ : Dict = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
a_ : Union[str, Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
a_ : Dict = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
a_ , a_ : List[str] = self.get_input_output_texts(__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return text, ids
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : List[str] = self.get_tokenizer()
# Testing tokenization
a_ : Tuple = '''こんにちは、世界。 こんばんは、㔺界。'''
a_ : Any = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
a_ : Dict = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
a_ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
a_ : Tuple = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
a_ : Optional[int] = tokens + [tokenizer.unk_token]
a_ : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
a_ : int = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : Tuple = self.get_tokenizer()
# Testing tokenization
a_ : str = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
a_ : str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
a_ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
a_ : Optional[Any] = '''こんにちは、世界。'''
a_ : Union[str, Any] = '''こんばんは、㔺界。😀'''
a_ : Union[str, Any] = '''こんにちは、世界。こんばんは、世界。😀'''
a_ : Union[str, Any] = tokenizer.encode(prefix_text + input_text )
a_ : Dict = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
a_ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , prefix_text=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.decode(__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.decode(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
a_ : Any = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
a_ : Any = '''こんにちは、世界。'''
a_ : Tuple = '''こんばんは、㔺界。😀'''
a_ : Tuple = len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) - 2
a_ : List[str] = len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) - 2
a_ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
a_ : int = [1] * (len_prefix + len_text + 1) + [0]
a_ : Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
a_ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids
a_ : Union[str, Any] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
a_ : Tuple = tokenizer(__SCREAMING_SNAKE_CASE , prefix_text=__SCREAMING_SNAKE_CASE ).token_type_ids
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
a_ : int = tokenizer.encode('''あンいワ''' )
a_ : List[Any] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
a_ : str = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(__SCREAMING_SNAKE_CASE ) , tokenizer.decode(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(tokenizer.decode(__SCREAMING_SNAKE_CASE ) , tokenizer.decode(__SCREAMING_SNAKE_CASE ) )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
a_ : Optional[int] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
a_ : Any = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.batch_encode_plus(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
# fmt: off
a_ : List[Any] = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
a_ : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
a_ : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.token_type_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.attention_mask , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.input_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.token_type_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.attention_mask , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
# tokenizer has no padding token
pass
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Tuple = 0
a_ : Tuple = [0]
a_ : List[Any] = [0]
a_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 0 )
a_ : str = [60]
a_ : Optional[int] = [10]
a_ : Any = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 0 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : Optional[Any] = 3
a_ : str = [1, 2, 3]
a_ : Optional[int] = [3, 2, 1]
a_ : List[Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 5 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : Any = 50
a_ : Dict = [60, 100, 120]
a_ : int = [10, 20, 30]
a_ : Dict = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 220 )
if __name__ == "__main__":
unittest.main()
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : int=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : int=1e-5 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=["stage1", "stage2", "stage3"] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 3] , ) -> Any:
a_ : Dict = parent
a_ : str = batch_size
a_ : Union[str, Any] = image_size
a_ : Optional[int] = patch_size
a_ : List[str] = num_channels
a_ : Union[str, Any] = embed_dim
a_ : Optional[int] = depths
a_ : Optional[int] = num_heads
a_ : List[Any] = window_size
a_ : List[str] = mlp_ratio
a_ : Tuple = qkv_bias
a_ : Tuple = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : Optional[int] = drop_path_rate
a_ : str = hidden_act
a_ : List[Any] = use_absolute_embeddings
a_ : int = patch_norm
a_ : List[Any] = layer_norm_eps
a_ : str = initializer_range
a_ : Dict = is_training
a_ : str = scope
a_ : int = use_labels
a_ : Optional[int] = type_sequence_label_size
a_ : Dict = encoder_stride
a_ : List[Any] = out_features
a_ : str = out_indices
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Optional[int] = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
a_ : List[Any] = MaskFormerSwinModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
a_ : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
a_ : Union[str, Any] = MaskFormerSwinBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Dict = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = ['''stem''']
a_ : str = MaskFormerSwinBackbone(config=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : Any = self.prepare_config_and_inputs()
a_ , a_ , a_ : int = config_and_inputs
a_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
snake_case__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Dict = MaskFormerSwinModelTester(self )
a_ : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
return
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Swin does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(__SCREAMING_SNAKE_CASE )
a_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Optional[int] = [*signature.parameters.keys()]
a_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
a_ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
a_ : Any = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
a_ : Union[str, Any] = outputs.hidden_states
a_ : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
a_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a_ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
a_ : int = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Optional[Any] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : int = 3
a_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a_ : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
a_ : Union[str, Any] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : str = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE : Optional[Any] ):
a_ : str = 0
return t
def check_equivalence(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any={} ):
with torch.no_grad():
a_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(__SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(__SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(__SCREAMING_SNAKE_CASE )}. Dict has'
f' `nan`: {torch.isnan(__SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(__SCREAMING_SNAKE_CASE )}.'
) , )
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
a_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Any = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
a_ : int = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
a_ : Optional[int] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
snake_case__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
snake_case__ = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : List[Any] = MaskFormerSwinModelTester(self )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
a_ : int = backbone_class(__SCREAMING_SNAKE_CASE )
backbone.to(__SCREAMING_SNAKE_CASE )
backbone.eval()
a_ : List[Any] = backbone(**__SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
a_ : List[Any] = backbone(**__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
a_ , a_ , a_ : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
a_ : Union[str, Any] = backbone(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
snake_case__ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 5_0257 , __SCREAMING_SNAKE_CASE : int = 1024 , __SCREAMING_SNAKE_CASE : int = 768 , __SCREAMING_SNAKE_CASE : int = 12 , __SCREAMING_SNAKE_CASE : int = 12 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "gelu_new" , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 1e-5 , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , ) -> List[Any]:
super().__init__()
a_ : Optional[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
a_ : str = prefix_inner_dim
a_ : Optional[Any] = prefix_hidden_dim
a_ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
a_ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , __SCREAMING_SNAKE_CASE ) if self.prefix_hidden_dim is not None else nn.Identity()
)
a_ : Union[str, Any] = GPTaConfig(
vocab_size=__SCREAMING_SNAKE_CASE , n_positions=__SCREAMING_SNAKE_CASE , n_embd=__SCREAMING_SNAKE_CASE , n_layer=__SCREAMING_SNAKE_CASE , n_head=__SCREAMING_SNAKE_CASE , n_inner=__SCREAMING_SNAKE_CASE , activation_function=__SCREAMING_SNAKE_CASE , resid_pdrop=__SCREAMING_SNAKE_CASE , embd_pdrop=__SCREAMING_SNAKE_CASE , attn_pdrop=__SCREAMING_SNAKE_CASE , layer_norm_epsilon=__SCREAMING_SNAKE_CASE , initializer_range=__SCREAMING_SNAKE_CASE , scale_attn_weights=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , scale_attn_by_inverse_layer_idx=__SCREAMING_SNAKE_CASE , reorder_and_upcast_attn=__SCREAMING_SNAKE_CASE , )
a_ : Tuple = GPTaLMHeadModel(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> List[Any]:
a_ : Any = self.transformer.transformer.wte(__SCREAMING_SNAKE_CASE )
a_ : Dict = self.encode_prefix(__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.decode_prefix(__SCREAMING_SNAKE_CASE )
a_ : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
a_ : List[str] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
a_ : int = torch.cat((dummy_token, input_ids) , dim=1 )
a_ : Dict = self.transformer(inputs_embeds=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.device ) -> torch.Tensor:
return torch.zeros(__SCREAMING_SNAKE_CASE , self.prefix_length , dtype=torch.intaa , device=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
return self.encode_prefix(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Dict:
a_ : Dict = torch.split(__SCREAMING_SNAKE_CASE , 1 , dim=0 )
a_ : str = []
a_ : Optional[int] = []
for feature in features:
a_ : str = self.decode_prefix(feature.to(__SCREAMING_SNAKE_CASE ) ) # back to the clip feature
# Only support beam search for now
a_ , a_ : List[Any] = self.generate_beam(
input_embeds=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
a_ : Dict = torch.stack(__SCREAMING_SNAKE_CASE )
a_ : List[str] = torch.stack(__SCREAMING_SNAKE_CASE )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : int = 5 , __SCREAMING_SNAKE_CASE : int = 67 , __SCREAMING_SNAKE_CASE : float = 1.0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> Dict:
a_ : int = eos_token_id
a_ : Dict = None
a_ : Dict = None
a_ : List[Any] = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=torch.int )
a_ : Any = torch.zeros(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=torch.bool )
if input_embeds is not None:
a_ : Tuple = input_embeds
else:
a_ : Dict = self.transformer.transformer.wte(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = self.transformer(inputs_embeds=__SCREAMING_SNAKE_CASE )
a_ : Tuple = outputs.logits
a_ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
a_ : Optional[Any] = logits.softmax(-1 ).log()
if scores is None:
a_ , a_ : Optional[Any] = logits.topk(__SCREAMING_SNAKE_CASE , -1 )
a_ : Dict = generated.expand(__SCREAMING_SNAKE_CASE , *generated.shape[1:] )
a_ , a_ : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
a_ : Optional[int] = next_tokens
else:
a_ : int = tokens.expand(__SCREAMING_SNAKE_CASE , *tokens.shape[1:] )
a_ : Tuple = torch.cat((tokens, next_tokens) , dim=1 )
else:
a_ : List[Any] = -float(np.inf )
a_ : Tuple = 0
a_ : List[Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
a_ : str = scores_sum / seq_lengths[:, None]
a_ , a_ : List[str] = scores_sum_average.view(-1 ).topk(__SCREAMING_SNAKE_CASE , -1 )
a_ : str = next_tokens // scores_sum.shape[1]
a_ : int = seq_lengths[next_tokens_source]
a_ : Tuple = next_tokens % scores_sum.shape[1]
a_ : List[str] = next_tokens.unsqueeze(1 )
a_ : Tuple = tokens[next_tokens_source]
a_ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
a_ : Tuple = generated[next_tokens_source]
a_ : int = scores_sum_average * seq_lengths
a_ : int = is_stopped[next_tokens_source]
a_ : Optional[int] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
a_ : Optional[Any] = torch.cat((generated, next_token_embed) , dim=1 )
a_ : List[str] = is_stopped + next_tokens.eq(__SCREAMING_SNAKE_CASE ).squeeze()
if is_stopped.all():
break
a_ : Optional[Any] = scores / seq_lengths
a_ : str = scores.argsort(descending=__SCREAMING_SNAKE_CASE )
# tokens tensors are already padded to max_seq_length
a_ : List[str] = [tokens[i] for i in order]
a_ : str = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 )
a_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = 42
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int = 6_5536 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : str = "fourier" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __SCREAMING_SNAKE_CASE : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __SCREAMING_SNAKE_CASE : Tuple[str] = "UNetMidBlock1D" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : Tuple[int] = (32, 32, 64) , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = False , ) -> List[str]:
super().__init__()
a_ : str = sample_size
# time
if time_embedding_type == "fourier":
a_ : List[str] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__SCREAMING_SNAKE_CASE , log=__SCREAMING_SNAKE_CASE , flip_sin_to_cos=__SCREAMING_SNAKE_CASE )
a_ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
a_ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__SCREAMING_SNAKE_CASE , downscale_freq_shift=__SCREAMING_SNAKE_CASE )
a_ : str = block_out_channels[0]
if use_timestep_embedding:
a_ : str = block_out_channels[0] * 4
a_ : Any = TimestepEmbedding(
in_channels=__SCREAMING_SNAKE_CASE , time_embed_dim=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , out_dim=block_out_channels[0] , )
a_ : Dict = nn.ModuleList([] )
a_ : List[Any] = None
a_ : Optional[int] = nn.ModuleList([] )
a_ : Union[str, Any] = None
# down
a_ : str = in_channels
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
a_ : List[Any] = output_channel
a_ : Dict = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
a_ : Union[str, Any] = i == len(__SCREAMING_SNAKE_CASE ) - 1
a_ : Optional[Any] = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
a_ : Optional[Any] = get_mid_block(
__SCREAMING_SNAKE_CASE , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__SCREAMING_SNAKE_CASE , add_downsample=__SCREAMING_SNAKE_CASE , )
# up
a_ : str = list(reversed(__SCREAMING_SNAKE_CASE ) )
a_ : Union[str, Any] = reversed_block_out_channels[0]
if out_block_type is None:
a_ : List[str] = out_channels
else:
a_ : int = block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
a_ : Union[str, Any] = output_channel
a_ : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(__SCREAMING_SNAKE_CASE ) - 1 else final_upsample_channels
)
a_ : Any = i == len(__SCREAMING_SNAKE_CASE ) - 1
a_ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
a_ : str = output_channel
# out
a_ : Dict = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
a_ : int = get_out_block(
out_block_type=__SCREAMING_SNAKE_CASE , num_groups_out=__SCREAMING_SNAKE_CASE , embed_dim=block_out_channels[0] , out_channels=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, float, int] , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[UNetaDOutput, Tuple]:
a_ : List[str] = timestep
if not torch.is_tensor(__SCREAMING_SNAKE_CASE ):
a_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
a_ : Tuple = timesteps[None].to(sample.device )
a_ : Any = self.time_proj(__SCREAMING_SNAKE_CASE )
if self.config.use_timestep_embedding:
a_ : List[str] = self.time_mlp(__SCREAMING_SNAKE_CASE )
else:
a_ : Any = timestep_embed[..., None]
a_ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
a_ : Any = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
a_ : Tuple = ()
for downsample_block in self.down_blocks:
a_ , a_ : Any = downsample_block(hidden_states=__SCREAMING_SNAKE_CASE , temb=__SCREAMING_SNAKE_CASE )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
a_ : Tuple = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
a_ : Optional[Any] = down_block_res_samples[-1:]
a_ : Any = down_block_res_samples[:-1]
a_ : Optional[Any] = upsample_block(__SCREAMING_SNAKE_CASE , res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , temb=__SCREAMING_SNAKE_CASE )
# 5. post-process
if self.out_block:
a_ : List[Any] = self.out_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["input_features", "attention_mask"]
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=80 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_6000 , __SCREAMING_SNAKE_CASE : Dict=80 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]=True , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Tuple:
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = num_mel_bins
a_ : str = do_ceptral_normalize
a_ : int = normalize_means
a_ : Optional[int] = normalize_vars
a_ : Dict = True
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : np.ndarray , ) -> np.ndarray:
a_ : str = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
a_ : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
a_ : List[Any] = ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : float = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
a_ : Any = x[:input_length].mean(axis=0 )
a_ : Any = np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if normalize_vars:
a_ : Dict = x[:input_length].std(axis=0 )
a_ : List[Any] = np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
a_ : Any = padding_value
# make sure array is in float32
a_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None ) -> List[np.ndarray]:
a_ : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
a_ : Dict = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
a_ : int = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : Dict = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
a_ : Optional[int] = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : List[Any] = [raw_speech]
# extract fbank features
a_ : int = [self._extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
a_ : Dict = BatchFeature({'''input_features''': features} )
a_ : int = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
a_ : Optional[Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
a_ : str = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
a_ : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
a_ : str = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
a_ : List[str] = (
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a_ : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
a_ : Optional[int] = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _UpperCAmelCase ( __A : Any ):
a_ : Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ , a_ : str = emb.weight.shape
a_ : Any = nn.Linear(__A , __A , bias=__A )
a_ : List[Any] = emb.weight.data
return lin_layer
def _UpperCAmelCase ( __A : List[Any] ):
a_ : int = torch.load(__A , map_location='''cpu''' )
a_ : Tuple = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
a_ : List[str] = mam_aaa['''model''']
remove_ignore_keys_(__A )
a_ : Union[str, Any] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
a_ : List[str] = MaMaaaConfig(
vocab_size=__A , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
a_ : List[Any] = state_dict['''decoder.embed_tokens.weight''']
a_ : List[str] = MaMaaaForConditionalGeneration(__A )
model.model.load_state_dict(__A , strict=__A )
a_ : Tuple = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[float] ):
a_ : Dict = 0.00
a_ : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
a_ : str = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCAmelCase ( __A : list[float] ):
a_ : List[Any] = 0.00
a_ : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
a_ : int = f'Resistor at index {index} has a negative value!'
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["input_values", "padding_mask"]
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : int = 2_4000 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : float = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]:
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Tuple = chunk_length_s
a_ : Tuple = overlap
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str, PaddingStrategy]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
a_ : Optional[Any] = True
a_ : Tuple = bool(
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
a_ : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
a_ : Tuple = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
a_ : Union[str, Any] = [np.asarray(__SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(__SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' )
a_ : Union[str, Any] = None
a_ : Union[str, Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
a_ : Optional[int] = min(array.shape[0] for array in raw_audio )
a_ : Optional[Any] = int(np.floor(max_length / self.chunk_stride ) )
a_ : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
a_ : List[Any] = max(array.shape[0] for array in raw_audio )
a_ : Optional[int] = int(np.ceil(max_length / self.chunk_stride ) )
a_ : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
a_ : List[Any] = '''max_length'''
else:
a_ : Optional[int] = input_values
# normal padding on batch
if padded_inputs is None:
a_ : Optional[int] = self.pad(
__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
if padding:
a_ : Tuple = padded_inputs.pop('''attention_mask''' )
a_ : List[str] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
a_ : List[str] = example[..., None]
input_values.append(example.T )
a_ : List[Any] = input_values
if return_tensors is not None:
a_ : int = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Dict=30 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Tuple=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=10 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : str=None , ) -> Union[str, Any]:
a_ : Tuple = parent
a_ : List[Any] = batch_size
a_ : List[Any] = image_size
a_ : int = patch_size
a_ : Dict = num_channels
a_ : List[str] = is_training
a_ : int = use_labels
a_ : int = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : Union[str, Any] = intermediate_size
a_ : List[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : Optional[int] = type_sequence_label_size
a_ : int = initializer_range
a_ : Union[str, Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ : List[Any] = (image_size // patch_size) ** 2
a_ : Any = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Optional[int] = None
if self.use_labels:
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
a_ : Tuple = ViTMSNModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
a_ : Tuple = self.type_sequence_label_size
a_ : str = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : str = 1
a_ : Any = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : str = self.prepare_config_and_inputs()
a_ , a_ , a_ : Tuple = config_and_inputs
a_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : Any = ViTMSNModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : int = [*signature.parameters.keys()]
a_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = ViTMSNModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ):
a_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(2 )
a_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = self.default_image_processor
a_ : Optional[Any] = prepare_img()
a_ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
a_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
a_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
a_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _UpperCAmelCase ( __A : dict ):
return (data["data"], data["target"])
def _UpperCAmelCase ( __A : np.ndarray , __A : np.ndarray ):
a_ : Optional[int] = XGBClassifier()
classifier.fit(__A , __A )
return classifier
def _UpperCAmelCase ( ):
a_ : Any = load_iris()
a_ , a_ : List[str] = data_handling(__A )
a_ , a_ , a_ , a_ : Tuple = train_test_split(
__A , __A , test_size=0.25 )
a_ : List[str] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
a_ : Optional[Any] = xgboost(__A , __A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__A , __A , __A , display_labels=__A , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar('T')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : bool = True ) -> None:
a_ : dict[T, list[T]] = {} # dictionary of lists
a_ : Tuple = directed
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__SCREAMING_SNAKE_CASE )
self.adj_list[destination_vertex].append(__SCREAMING_SNAKE_CASE )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__SCREAMING_SNAKE_CASE )
a_ : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a_ : Optional[Any] = [destination_vertex]
a_ : int = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__SCREAMING_SNAKE_CASE )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__SCREAMING_SNAKE_CASE )
a_ : int = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a_ : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a_ : int = [destination_vertex]
a_ : Optional[int] = []
return self
def __repr__( self : Tuple ) -> str:
return pformat(self.adj_list )
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : list ):
if len(__A ) <= 1:
return [tuple(__A )]
a_ : Optional[int] = []
def generate(__A : int , __A : list ):
a_ : Tuple = [0] * n
res.append(tuple(__A ) )
a_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
a_ , a_ : Optional[Any] = arr[i], arr[0]
else:
a_ , a_ : int = arr[i], arr[c[i]]
res.append(tuple(__A ) )
c[i] += 1
a_ : List[Any] = 0
else:
a_ : List[Any] = 0
i += 1
generate(len(__A ) , __A )
return res
if __name__ == "__main__":
__lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _UpperCAmelCase ( __A : list[int] , __A : int , __A : int = 0 , __A : int = -1 ):
if hi < 0:
a_ : Optional[Any] = len(__A )
while lo < hi:
a_ : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
a_ : List[str] = mid + 1
else:
a_ : List[str] = mid
return lo
def _UpperCAmelCase ( __A : list[int] , __A : int , __A : int = 0 , __A : int = -1 ):
if hi < 0:
a_ : int = len(__A )
while lo < hi:
a_ : int = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
a_ : List[str] = mid + 1
else:
a_ : Dict = mid
return lo
def _UpperCAmelCase ( __A : list[int] , __A : int , __A : int = 0 , __A : int = -1 ):
sorted_collection.insert(bisect_left(__A , __A , __A , __A ) , __A )
def _UpperCAmelCase ( __A : list[int] , __A : int , __A : int = 0 , __A : int = -1 ):
sorted_collection.insert(bisect_right(__A , __A , __A , __A ) , __A )
def _UpperCAmelCase ( __A : list[int] , __A : int ):
a_ : Any = 0
a_ : int = len(__A ) - 1
while left <= right:
a_ : Optional[Any] = left + (right - left) // 2
a_ : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
a_ : str = midpoint - 1
else:
a_ : str = midpoint + 1
return None
def _UpperCAmelCase ( __A : list[int] , __A : int ):
a_ : Optional[int] = bisect.bisect_left(__A , __A )
if index != len(__A ) and sorted_collection[index] == item:
return index
return None
def _UpperCAmelCase ( __A : list[int] , __A : int , __A : int , __A : int ):
if right < left:
return None
a_ : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__A , __A , __A , midpoint - 1 )
else:
return binary_search_by_recursion(__A , __A , midpoint + 1 , __A )
if __name__ == "__main__":
__lowerCAmelCase = input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase = sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase = int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int , __A : str ):
a_ : Any = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( __A : Any , __A : Tuple , __A : Dict ):
a_ : str = 0
while b > 0:
if b & 1:
a_ : Dict = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "openai/whisper-base"
snake_case__ = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
snake_case__ = "transcriber"
snake_case__ = WhisperProcessor
snake_case__ = WhisperForConditionalGeneration
snake_case__ = ["audio"]
snake_case__ = ["text"]
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Any ) -> int:
return self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
return self.model.generate(inputs=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0]
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "xlm-roberta"
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_0522 , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Any=3072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-12 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]="absolute" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[Any]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : int = vocab_size
a_ : Optional[Any] = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : Tuple = hidden_act
a_ : Optional[int] = intermediate_size
a_ : Dict = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : List[Any] = max_position_embeddings
a_ : Union[str, Any] = type_vocab_size
a_ : Tuple = initializer_range
a_ : List[str] = layer_norm_eps
a_ : Tuple = position_embedding_type
a_ : Tuple = use_cache
a_ : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a_ : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def _UpperCAmelCase ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
a_ : str = cst_fwd.get(__A , np.inf )
a_ : List[str] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
a_ : Dict = new_cost_f
a_ : int = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
a_ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _UpperCAmelCase ( __A : str , __A : str , __A : dict , __A : dict ):
a_ : int = -1
a_ : Optional[Any] = set()
a_ : Optional[Any] = set()
a_ : Any = {source: 0}
a_ : Dict = {destination: 0}
a_ : str = {source: None}
a_ : List[Any] = {destination: None}
a_ : PriorityQueue[Any] = PriorityQueue()
a_ : PriorityQueue[Any] = PriorityQueue()
a_ : Optional[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
a_ , a_ : int = queue_forward.get()
visited_forward.add(__A )
a_ , a_ : Optional[int] = queue_backward.get()
visited_backward.add(__A )
a_ : Tuple = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
a_ : Any = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
a_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__lowerCAmelCase = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
__lowerCAmelCase = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _UpperCAmelCase ( __A : Dict ):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def _UpperCAmelCase ( __A : Optional[int] ):
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
a_ : List[Any] = metric_id
class SCREAMING_SNAKE_CASE :
snake_case__ = [MetricMock(SCREAMING_SNAKE_CASE_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def _UpperCAmelCase ( __A : Any , __A : str , __A : Optional[Any] , __A : Dict , __A : Dict ):
if "tmp_path" in args:
a_ : int = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__A , match='''https://huggingface.co/docs/evaluate''' ):
func(*__A )
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : Optional[Any] = 4
a_ : Optional[Any] = 3
a_ : Union[str, Any] = (32, 32)
a_ : int = jax.random.PRNGKey(0 )
a_ : Tuple = jax.random.uniform(__SCREAMING_SNAKE_CASE , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : Optional[int] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
a_ : Dict = self.dummy_input
return init_dict, inputs_dict
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = XLMTokenizer
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a_ : Any = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Dict = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
a_ : int = '''lower newer'''
a_ : Optional[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Dict = XLMTokenizer(self.vocab_file , self.merges_file )
a_ : str = '''lower'''
a_ : List[str] = ['''low''', '''er</w>''']
a_ : Dict = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokens + ['''<unk>''']
a_ : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
a_ : Optional[Any] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
a_ : str = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
a_ : str = int(__A )
if decimal in (0, 1): # Exit cases for the recursion
return str(__A )
a_ , a_ : Union[str, Any] = divmod(__A , 2 )
return binary_recursive(__A ) + str(__A )
def _UpperCAmelCase ( __A : str ):
a_ : Optional[int] = str(__A ).strip()
if not number:
raise ValueError('''No input value was provided''' )
a_ : List[Any] = '''-''' if number.startswith('''-''' ) else ''''''
a_ : Tuple = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'{negative}0b{binary_recursive(int(__A ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCAmelCase ( __A : int ):
a_ : Any = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
a_ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
a_ : str = True if '''large''' in model_name or '''huge''' in model_name else False
a_ : Optional[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ : Any = [3, 3, 3, 3]
a_ : Union[str, Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ : Optional[Any] = [4, 4, 4, 4]
a_ : List[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ : List[Any] = [3, 3, 3, 3]
if "lrf" in model_name:
a_ : List[str] = [3, 3, 3, 3]
else:
a_ : int = [2, 2, 2, 2]
if "tiny" in model_name:
a_ : Any = 96
elif "small" in model_name:
a_ : int = 96
elif "base" in model_name:
a_ : Any = 1_28
elif "large" in model_name:
a_ : Union[str, Any] = 1_92
elif "xlarge" in model_name:
a_ : Optional[Any] = 2_56
elif "huge" in model_name:
a_ : Optional[int] = 3_52
# set label information
a_ : Any = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
a_ : Tuple = '''imagenet-22k-id2label.json'''
else:
a_ : Dict = '''imagenet-1k-id2label.json'''
a_ : Optional[int] = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
a_ : Optional[Any] = {int(__A ): v for k, v in idalabel.items()}
a_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
a_ : List[str] = FocalNetConfig(
embed_dim=__A , depths=__A , focal_levels=__A , focal_windows=__A , use_conv_embed=__A , idalabel=__A , labelaid=__A , use_post_layernorm=__A , use_layerscale=__A , )
return config
def _UpperCAmelCase ( __A : Tuple ):
if "patch_embed.proj" in name:
a_ : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
a_ : Optional[int] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
a_ : Tuple = '''encoder.''' + name
if "encoder.layers" in name:
a_ : str = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
a_ : Optional[Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
a_ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ : Optional[Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ : int = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
a_ : Dict = '''layernorm.weight'''
if name == "norm.bias":
a_ : List[str] = '''layernorm.bias'''
if "head" in name:
a_ : List[Any] = name.replace('''head''' , '''classifier''' )
else:
a_ : Tuple = '''focalnet.''' + name
return name
def _UpperCAmelCase ( __A : List[Any] , __A : Optional[Any] , __A : str=False ):
# fmt: off
a_ : Tuple = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
a_ : Any = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __A )
a_ : Optional[Any] = torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
a_ : Union[str, Any] = state_dict.pop(__A )
a_ : Optional[int] = val
a_ : Tuple = get_focalnet_config(__A )
a_ : str = FocalNetForImageClassification(__A )
model.eval()
# load state dict
model.load_state_dict(__A )
# verify conversion
a_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : Any = BitImageProcessor(
do_resize=__A , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__A , crop_size=2_24 , do_normalize=__A , image_mean=__A , image_std=__A , )
a_ : Optional[int] = Image.open(requests.get(__A , stream=__A ).raw )
a_ : Optional[Any] = processor(images=__A , return_tensors='''pt''' )
a_ : Union[str, Any] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ : int = image_transforms(__A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __A , atol=1E-4 )
a_ : List[str] = model(**__A )
a_ : Optional[Any] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ : List[Any] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ : List[Any] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ : Optional[Any] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ : Any = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ : Any = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__lowerCAmelCase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = StableUnCLIPImgaImgPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ = frozenset([] )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : List[Any] = 32
a_ : Optional[int] = embedder_hidden_size
# image encoding components
a_ : str = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a_ : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=__SCREAMING_SNAKE_CASE , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a_ : str = StableUnCLIPImageNormalizer(embedding_dim=__SCREAMING_SNAKE_CASE )
a_ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
a_ : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=__SCREAMING_SNAKE_CASE , use_linear_projection=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
a_ : Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
a_ : Optional[Any] = AutoencoderKL()
a_ : Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Optional[int]=True ) -> List[str]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if pil_image:
a_ : List[str] = input_image * 0.5 + 0.5
a_ : List[Any] = input_image.clamp(0 , 1 )
a_ : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a_ : Optional[Any] = DiffusionPipeline.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : List[Any] = self.get_dummy_components()
a_ : Any = StableUnCLIPImgaImgPipeline(**__SCREAMING_SNAKE_CASE )
a_ : int = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
inputs.update({'''image_embeds''': None} )
a_ : List[Any] = sd_pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : List[str] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
a_ : int = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__SCREAMING_SNAKE_CASE )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
a_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
a_ : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
a_ : Any = pipe(__SCREAMING_SNAKE_CASE , '''anime turle''' , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' )
a_ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
a_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
a_ : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
a_ : Tuple = pipe(__SCREAMING_SNAKE_CASE , '''anime turle''' , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' )
a_ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a_ : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
a_ : List[str] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ : List[Any] = pipe(
__SCREAMING_SNAKE_CASE , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
a_ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
import functools
def _UpperCAmelCase ( __A : list[int] , __A : list[int] ):
# Validation
if not isinstance(__A , __A ) or not all(isinstance(__A , __A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__A ) != 3 or not all(isinstance(__A , __A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__A ) == 0:
return 0
if min(__A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
a_ : int = set(__A )
@functools.cache
def dynamic_programming(__A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowerCAmelCase = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__lowerCAmelCase = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__lowerCAmelCase = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _UpperCAmelCase ( __A : List[Any] , __A : Any ):
return float((preds == labels).mean() )
def _UpperCAmelCase ( __A : List[Any] , __A : List[str] ):
a_ : Union[str, Any] = simple_accuracy(__A , __A )
a_ : List[Any] = float(fa_score(y_true=__A , y_pred=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Optional[int] = float(pearsonr(__A , __A )[0] )
a_ : Any = float(spearmanr(__A , __A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "stsb":
return pearson_and_spearman(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.