code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _A ( lowerCamelCase_ ):
def __init__( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = params
__snake_case : Optional[Any] = np.array(_UpperCAmelCase )
__snake_case : Tuple = np.array([len(_UpperCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Optional[int] , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return len(self.lengths )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = self.params.max_model_input_size
__snake_case : List[str] = self.lengths > max_len
logger.info(f'''Splitting {sum(_UpperCAmelCase )} too long sequences.''' )
def divide_chunks(__magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
return [l[i : i + n] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )]
__snake_case : Any = []
__snake_case : Optional[Any] = []
if self.params.mlm:
__snake_case , __snake_case : Dict = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__snake_case , __snake_case : Optional[Any] = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__snake_case : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__snake_case : List[str] = np.insert(_UpperCAmelCase , 0 , _UpperCAmelCase )
if sub_s[-1] != sep_id:
__snake_case : Tuple = np.insert(_UpperCAmelCase , len(_UpperCAmelCase ) , _UpperCAmelCase )
assert len(_UpperCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_UpperCAmelCase )
new_tok_ids.extend(_UpperCAmelCase )
new_lengths.extend([len(_UpperCAmelCase ) for l in sub_seqs] )
__snake_case : str = np.array(_UpperCAmelCase )
__snake_case : Tuple = np.array(_UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
__snake_case : List[Any] = len(self )
__snake_case : Tuple = self.lengths > 11
__snake_case : int = self.token_ids[indices]
__snake_case : Dict = self.lengths[indices]
__snake_case : Optional[int] = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__snake_case : int = self.params.special_tok_ids["""unk_token"""]
__snake_case : Union[str, Any] = len(self )
__snake_case : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__snake_case : str = (unk_occs / self.lengths) < 0.5
__snake_case : List[str] = self.token_ids[indices]
__snake_case : Optional[int] = self.lengths[indices]
__snake_case : Tuple = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowercase__ ( self : int , __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [t[0] for t in batch]
__snake_case : str = [t[1] for t in batch]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
# Max for paddings
__snake_case : Optional[int] = max(_UpperCAmelCase )
# Pad token ids
if self.params.mlm:
__snake_case : int = self.params.special_tok_ids["""pad_token"""]
else:
__snake_case : str = self.params.special_tok_ids["""unk_token"""]
__snake_case : str = [list(t.astype(_UpperCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_UpperCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(_UpperCAmelCase )
assert all(len(_UpperCAmelCase ) == max_seq_len_ for t in tk_ )
__snake_case : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__snake_case : List[Any] = torch.tensor(_UpperCAmelCase ) # (bs)
return tk_t, lg_t
| 366 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : Dict = output[output != -float("""inf""" )]
__snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Tuple = [[1, 0], [1, 1]]
__snake_case : Union[str, Any] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Dict = 1
__snake_case : int = 2
class _A ( tf.Module ):
def __init__( self : Tuple , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : Union[str, Any] = [[2], [1_02, 1_03]]
__snake_case : Tuple = [[1], [1, 1]]
__snake_case : List[str] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : Tuple = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : str = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : int = CompleteSentenceTransformer()
__snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Tuple = complete_model(__magic_name__ )
__snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : str = 14
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = """Hello, my dog is cute and"""
__snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : str = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : int = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 13 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( _a ):
def __init__( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
__snake_case : Dict = None
def lowercase__ ( self : str , __magic_name__ : int ) -> str:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : Union[str, Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[Any] = str(distributed_port + 1 )
__snake_case : List[str] = dist.new_group(ranks=__lowerCamelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=torch.floataa ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : List[Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __lowerCamelCase )
return ifname
def lowercase__ ( self : Optional[int] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple:
"""simple docstring"""
if not dist.is_initialized():
__snake_case : List[str] = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
__snake_case : List[Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
__snake_case : str = question_hidden_states.shape[0]
__snake_case : Tuple = []
__snake_case : int = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
__snake_case : Dict = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
__snake_case : int = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
__snake_case : Optional[Any] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
__snake_case : Dict = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
__snake_case : Tuple = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : str = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 367 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
__snake_case : int = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("""""" )
print(len(_lowerCamelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 13 | 0 |
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
return "".join([hex(a__ )[2:].zfill(2 ).upper() for byte in list(a__ )] )
def _a ( _lowerCamelCase ) -> bytes:
"""simple docstring"""
if (len(a__ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:\nData does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a__ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13 | 0 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> int:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
__snake_case : Any = os.path.abspath(_lowerCamelCase )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
__snake_case : Union[str, Any] = torch.load(_lowerCamelCase , map_location="""cpu""" )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
__snake_case : Optional[Any] = convert_pytorch_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__snake_case : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
return flax_state_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_lowerCamelCase ) -> bool:
return len(set(_lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__snake_case : Dict = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__snake_case : Dict = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__snake_case : Union[str, Any] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__snake_case : Tuple = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__snake_case : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
__snake_case : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__snake_case : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
__snake_case : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__snake_case : str = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__snake_case : List[str] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__snake_case : List[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__snake_case : Union[str, Any] = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__snake_case : str = pt_tuple_key[-2] + "_v"
if name is not None:
__snake_case : Union[str, Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__snake_case : List[str] = flax_model.params["params"]
else:
__snake_case : Dict = flax_model.params
__snake_case : List[str] = flatten_dict(_lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case : str = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_lowerCamelCase )
__snake_case : str = {}
__snake_case : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__snake_case : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case : List[Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__snake_case : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case : int = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case : Any = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
__snake_case : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__snake_case : str = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
__snake_case : Union[str, Any] = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
__snake_case : str = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
import torch
# Load the index
__snake_case : Any = {}
for shard_file in shard_filenames:
# load using msgpack utils
__snake_case : Union[str, Any] = torch.load(_lowerCamelCase )
__snake_case : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case : List[Any] = flax_model.params["params"]
__snake_case : Optional[int] = flatten_dict(_lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
__snake_case : int = flax_model.params
__snake_case : List[str] = flatten_dict(_lowerCamelCase )
__snake_case : Any = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__snake_case : Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case : List[Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__snake_case : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case : int = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case : Dict = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
__snake_case : List[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__snake_case : Dict = jnp.asarray(_lowerCamelCase )
continue
if "var" in flax_key[-1]:
__snake_case : Any = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
__snake_case : Optional[Any] = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
__snake_case : str = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Tuple = os.path.abspath(_lowerCamelCase )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
__snake_case : Union[str, Any] = getattr(_lowerCamelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCamelCase , """rb""" ) as state_f:
try:
__snake_case : List[str] = from_bytes(_lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__snake_case : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__snake_case : Any = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
__snake_case : Union[str, Any] = flatten_dict(_lowerCamelCase )
__snake_case : List[str] = pt_model.state_dict()
__snake_case : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
__snake_case : Any = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__snake_case : Optional[Any] = []
__snake_case : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix
__snake_case : Dict = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case : Tuple = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCamelCase ) not in pt_model_dict:
# conv layer
__snake_case : int = flax_key_tuple[:-1] + ("weight",)
__snake_case : int = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ) not in pt_model_dict:
# linear layer
__snake_case : Any = flax_key_tuple[:-1] + ("weight",)
__snake_case : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__snake_case : Dict = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
__snake_case : int = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
__snake_case : List[str] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__snake_case : Optional[int] = ".".join(_lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__snake_case : List[str] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__snake_case : int = key.split(""".""" )
__snake_case : int = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__snake_case : Union[str, Any] = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
__snake_case : Optional[Any] = key_components[-2] + "_v"
if name is not None:
__snake_case : List[Any] = key_components[:-3] + [name]
__snake_case : List[Any] = ".".join(_lowerCamelCase )
__snake_case : List[Any] = key
if flax_key in special_pt_names:
__snake_case : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__snake_case : Union[str, Any] = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
__snake_case : List[Any] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
__snake_case : int = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(_lowerCamelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"""If your task is similar to the task the model of the checkpoint was trained on, """
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 369 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _A :
lowercase__: str
lowercase__: Optional[str] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return self.version_str
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ".".join(str(_lowerCamelCase ) for v in version_tuple )
| 13 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _A ( __lowercase ):
lowercase__: Any = '''informer'''
lowercase__: int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , __magic_name__ : Optional[Any] = None , __magic_name__ : Tuple = None , __magic_name__ : Optional[Any] = "student_t" , __magic_name__ : Tuple = "nll" , __magic_name__ : Dict = 1 , __magic_name__ : int = None , __magic_name__ : Any = "mean" , __magic_name__ : Any = 0 , __magic_name__ : Optional[int] = 0 , __magic_name__ : Union[str, Any] = 0 , __magic_name__ : List[str] = 0 , __magic_name__ : Any = None , __magic_name__ : Any = None , __magic_name__ : Optional[int] = 64 , __magic_name__ : List[Any] = 32 , __magic_name__ : Dict = 32 , __magic_name__ : int = 2 , __magic_name__ : Any = 2 , __magic_name__ : Any = 2 , __magic_name__ : str = 2 , __magic_name__ : Union[str, Any] = True , __magic_name__ : Tuple = "gelu" , __magic_name__ : List[str] = 0.05 , __magic_name__ : Dict = 0.1 , __magic_name__ : int = 0.1 , __magic_name__ : Union[str, Any] = 0.1 , __magic_name__ : Tuple = 0.1 , __magic_name__ : List[Any] = 1_00 , __magic_name__ : str = 0.02 , __magic_name__ : str=True , __magic_name__ : str = "prob" , __magic_name__ : Optional[int] = 5 , __magic_name__ : Optional[int] = True , **__magic_name__ : Any , ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = prediction_length
__snake_case : Dict = context_length or prediction_length
__snake_case : Optional[int] = distribution_output
__snake_case : List[str] = loss
__snake_case : Any = input_size
__snake_case : Dict = num_time_features
__snake_case : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__snake_case : Dict = scaling
__snake_case : List[Any] = num_dynamic_real_features
__snake_case : Optional[int] = num_static_real_features
__snake_case : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__snake_case : Tuple = cardinality
else:
__snake_case : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__snake_case : Optional[int] = embedding_dimension
else:
__snake_case : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case : Optional[int] = num_parallel_samples
# Transformer architecture configuration
__snake_case : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case : int = d_model
__snake_case : List[Any] = encoder_attention_heads
__snake_case : Any = decoder_attention_heads
__snake_case : Union[str, Any] = encoder_ffn_dim
__snake_case : List[Any] = decoder_ffn_dim
__snake_case : Union[str, Any] = encoder_layers
__snake_case : Union[str, Any] = decoder_layers
__snake_case : Tuple = dropout
__snake_case : Tuple = attention_dropout
__snake_case : List[Any] = activation_dropout
__snake_case : Any = encoder_layerdrop
__snake_case : Optional[int] = decoder_layerdrop
__snake_case : List[Any] = activation_function
__snake_case : Union[str, Any] = init_std
__snake_case : List[str] = use_cache
# Informer
__snake_case : Tuple = attention_type
__snake_case : Union[str, Any] = sampling_factor
__snake_case : List[Any] = distil
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 370 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__snake_case : Tuple = """"""
while len(_lowerCamelCase ) % 3 != 0:
__snake_case : Any = """0""" + bin_string
__snake_case : Tuple = [
bin_string[index : index + 3]
for index in range(len(_lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case : Tuple = 0
for index, val in enumerate(_lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) )
oct_string += str(_lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__UpperCamelCase = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__UpperCamelCase = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__UpperCamelCase = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Any ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def lowercase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] = 1 , __magic_name__ : Optional[int] = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__magic_name__ , hypotheses=__magic_name__ , min_len=__magic_name__ , max_len=__magic_name__ )
}
| 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase__: List[str] = "convnextv2"
def __init__( self : List[Any] , __magic_name__ : Any=3 , __magic_name__ : List[str]=4 , __magic_name__ : List[str]=4 , __magic_name__ : List[str]=None , __magic_name__ : Any=None , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1E-12 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Dict=2_24 , __magic_name__ : Tuple=None , __magic_name__ : Union[str, Any]=None , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_snake_case )
__snake_case : List[Any] = num_channels
__snake_case : Optional[int] = patch_size
__snake_case : Optional[Any] = num_stages
__snake_case : Any = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
__snake_case : Union[str, Any] = [3, 3, 9, 3] if depths is None else depths
__snake_case : Union[str, Any] = hidden_act
__snake_case : List[str] = initializer_range
__snake_case : int = layer_norm_eps
__snake_case : str = drop_path_rate
__snake_case : Union[str, Any] = image_size
__snake_case : Optional[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
__snake_case : Dict = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 350 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _A ( __lowercase ):
lowercase__: Dict = '''speech_to_text_2'''
lowercase__: List[Any] = ['''past_key_values''']
lowercase__: int = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , __magic_name__ : Tuple=1_00_00 , __magic_name__ : Any=6 , __magic_name__ : int=20_48 , __magic_name__ : Optional[Any]=4 , __magic_name__ : int=0.0 , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]="relu" , __magic_name__ : str=2_56 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=1 , __magic_name__ : Optional[Any]=0 , __magic_name__ : Tuple=2 , __magic_name__ : int=10_24 , **__magic_name__ : Tuple , ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = vocab_size
__snake_case : Optional[Any] = d_model
__snake_case : Tuple = decoder_ffn_dim
__snake_case : Optional[int] = decoder_layers
__snake_case : Union[str, Any] = decoder_attention_heads
__snake_case : List[Any] = dropout
__snake_case : Optional[Any] = attention_dropout
__snake_case : List[Any] = activation_dropout
__snake_case : Any = activation_function
__snake_case : List[str] = init_std
__snake_case : int = decoder_layerdrop
__snake_case : str = use_cache
__snake_case : Optional[int] = decoder_layers
__snake_case : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
| 351 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__UpperCamelCase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__UpperCamelCase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__UpperCamelCase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
__snake_case : int = len(references[0] )
if any(len(__magic_name__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__snake_case : List[Any] = [[refs[i] for refs in references] for i in range(__magic_name__ )]
__snake_case : Optional[int] = TER(
normalized=__magic_name__ , no_punct=__magic_name__ , asian_support=__magic_name__ , case_sensitive=__magic_name__ , )
__snake_case : Union[str, Any] = sb_ter.corpus_score(__magic_name__ , __magic_name__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 352 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = patch_size
__snake_case : List[str] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : List[str] = use_labels
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = attention_type
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[Any] = scope
__snake_case : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__snake_case : str = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__snake_case : str = self.num_labels
return config
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Any = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
# verify the logits shape
__snake_case : Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__: List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: List[str] = False
lowercase__: List[Any] = False
lowercase__: Dict = False
lowercase__: int = False
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = TimesformerModelTester(self )
__snake_case : List[Any] = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length
__snake_case : Tuple = self.model_tester.num_frames
__snake_case : str = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Optional[int] = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__snake_case : int = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.hidden_states
__snake_case : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__magic_name__ )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Dict = prepare_video()
__snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : int = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__UpperCamelCase = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__UpperCamelCase = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class _A ( lowerCamelCase__ ):
lowercase__: Optional[Any] = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: str = PRETRAINED_INIT_CONFIGURATION
lowercase__: Dict = RoFormerTokenizer
def __init__( self : Tuple , __magic_name__ : Dict=None , __magic_name__ : Any=None , __magic_name__ : List[str]=True , __magic_name__ : Dict="[UNK]" , __magic_name__ : Optional[Any]="[SEP]" , __magic_name__ : int="[PAD]" , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=None , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
__snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __magic_name__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __magic_name__ ) != strip_accents
):
__snake_case : Dict = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = do_lower_case
__snake_case : int = strip_accents
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : Dict = d
__snake_case : Optional[int] = self.__dict__["_tokenizer"].get_vocab()
__snake_case : int = PreTokenizer.custom(JiebaPreTokenizer(__magic_name__ ) )
def lowercase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : List[str]=None ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
__snake_case : Dict = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> int:
"""simple docstring"""
__snake_case : str = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None , __magic_name__ : Dict=False , **__magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = BertPreTokenizer()
return super().save_pretrained(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
__UpperCamelCase = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 354 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : str = 0
__snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if len(_lowerCamelCase ) <= 1:
return arr, 0
__snake_case : Any = len(_lowerCamelCase ) // 2
__snake_case : List[str] = arr[0:mid]
__snake_case : int = arr[mid:]
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = []
__snake_case : List[str] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__snake_case : Any = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
# an empty list should also have zero inversions
__snake_case : List[Any] = []
__snake_case : List[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _A :
def __init__( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int]=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Tuple=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : List[Any]=99 , __magic_name__ : Any=32 , __magic_name__ : Dict=2 , __magic_name__ : List[str]=4 , __magic_name__ : str=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : int=0.02 , __magic_name__ : Any=3 , __magic_name__ : Any=4 , __magic_name__ : Dict=None , ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = parent
__snake_case : Optional[int] = 13
__snake_case : Optional[int] = 7
__snake_case : str = True
__snake_case : Dict = True
__snake_case : int = True
__snake_case : Dict = True
__snake_case : List[str] = 99
__snake_case : Optional[Any] = 32
__snake_case : str = 2
__snake_case : Optional[Any] = 4
__snake_case : Union[str, Any] = 37
__snake_case : Optional[Any] = """gelu"""
__snake_case : Any = 0.1
__snake_case : List[str] = 0.1
__snake_case : List[Any] = 5_12
__snake_case : Union[str, Any] = 16
__snake_case : Any = 2
__snake_case : Dict = 0.02
__snake_case : Union[str, Any] = 3
__snake_case : Any = 4
__snake_case : List[Any] = None
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = None
if self.use_token_type_ids:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[Any] = None
__snake_case : Any = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = TFRoFormerModel(config=lowercase_ )
__snake_case : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Any = [input_ids, input_mask]
__snake_case : List[str] = model(lowercase_ )
__snake_case : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = True
__snake_case : int = TFRoFormerForCausalLM(config=lowercase_ )
__snake_case : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : int = model(lowercase_ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> str:
"""simple docstring"""
__snake_case : str = TFRoFormerForMaskedLM(config=lowercase_ )
__snake_case : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[Any] = TFRoFormerForSequenceClassification(config=lowercase_ )
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.num_choices
__snake_case : int = TFRoFormerForMultipleChoice(config=lowercase_ )
__snake_case : Tuple = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
__snake_case : List[str] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
__snake_case : Any = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
__snake_case : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = self.num_labels
__snake_case : Optional[int] = TFRoFormerForTokenClassification(config=lowercase_ )
__snake_case : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = TFRoFormerForQuestionAnswering(config=lowercase_ )
__snake_case : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : Any = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Any = config_and_inputs
__snake_case : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowercase__: List[Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__: Union[str, Any] = False
lowercase__: str = False
def lowercase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Dict = TFRoFormerModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : str = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(lowercase_ )
@require_tf
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__snake_case : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case : str = model(lowercase_ )[0]
# TODO Replace vocab size
__snake_case : Dict = 5_00_00
__snake_case : Any = [1, 6, vocab_size]
self.assertEqual(output.shape , lowercase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__snake_case : str = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
@require_tf
class _A ( unittest.TestCase ):
lowercase__: List[str] = 1e-4
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = tf.constant([[4, 10]] )
__snake_case : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__snake_case : Tuple = emba(input_ids.shape )
__snake_case : List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__snake_case : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
__snake_case : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
@require_tf
class _A ( unittest.TestCase ):
lowercase__: int = 1e-4
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__snake_case : str = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__snake_case : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__snake_case : Optional[Any] = embed_positions([2, 16, 7_68] )[None, None, :, :]
__snake_case , __snake_case : str = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase_ , lowercase_ , lowercase_ )
__snake_case : List[str] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__snake_case : Optional[int] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
| 355 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
__snake_case : Tuple = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__snake_case : int = 1
if upper_limit > 0:
__snake_case : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
__UpperCamelCase = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = CanineTokenizer
lowercase__: Optional[int] = False
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
__snake_case : Optional[Any] = 10_24
return tokenizer
@require_torch
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.canine_tokenizer
__snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Any = self.canine_tokenizer
__snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.canine_tokenizer
__snake_case : Optional[Any] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__snake_case : Any = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : str = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
__snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__snake_case : List[Any] = chr(0xE007 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE005
__snake_case : Tuple = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
__snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
__snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
__snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Dict = chr(0xE005 )
__snake_case : str = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__snake_case : Tuple = tokenizer.tokenize(__magic_name__ )
__snake_case : Any = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__snake_case : Optional[Any] = 0xE006
__snake_case : List[str] = chr(__magic_name__ )
__snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Any = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Tuple = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE006
__snake_case : int = chr(__magic_name__ )
__snake_case : List[Any] = [new_token_a]
__snake_case : Union[str, Any] = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__snake_case : Any = 0xE007
__snake_case : Any = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
__snake_case : Union[str, Any] = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : List[str] = """hello world"""
if self.space_between_special_tokens:
__snake_case : Union[str, Any] = """[CLS] hello world [SEP]"""
else:
__snake_case : List[Any] = input
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__snake_case : Dict = """a"""
__snake_case : Tuple = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
__snake_case : Dict = 0xE006
__snake_case : str = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
| 13 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''▁'''
__UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__UpperCamelCase = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
__UpperCamelCase = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__UpperCamelCase = {'''mustc''': MUSTC_LANGS}
class _A ( a_ ):
lowercase__: Optional[Any] = VOCAB_FILES_NAMES
lowercase__: Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Dict = MAX_MODEL_INPUT_SIZES
lowercase__: Any = ['''input_ids''', '''attention_mask''']
lowercase__: Dict = []
def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : Optional[Any]="</s>" , __magic_name__ : Dict="<pad>" , __magic_name__ : Any="<unk>" , __magic_name__ : Optional[int]=False , __magic_name__ : Optional[Any]=False , __magic_name__ : Any=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , do_upper_case=lowercase_ , do_lower_case=lowercase_ , tgt_lang=lowercase_ , lang_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
__snake_case : Optional[int] = do_upper_case
__snake_case : Dict = do_lower_case
__snake_case : Dict = load_json(lowercase_ )
__snake_case : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : List[str] = load_spm(lowercase_ , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : Optional[Any] = LANGUAGES[lang_codes]
__snake_case : int = [f'''<lang:{lang}>''' for lang in self.langs]
__snake_case : Tuple = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
__snake_case : List[str] = self.lang_tokens
__snake_case : Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[Any] = {}
@property
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
return len(self.encoder )
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def lowercase__ ( self : str , __magic_name__ : int ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase_ )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = self.lang_code_to_id[tgt_lang]
__snake_case : Union[str, Any] = [lang_code_id]
def lowercase__ ( self : Dict , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def lowercase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(lowercase_ , self.encoder[self.unk_token] )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int ) -> int:
"""simple docstring"""
return self.decoder.get(lowercase_ , self.unk_token )
def lowercase__ ( self : str , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Optional[int] = self.sp_model.decode(lowercase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : int = []
else:
current_sub_tokens.append(lowercase_ )
__snake_case : Dict = self.sp_model.decode(lowercase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> Optional[Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
__snake_case : int = [1] * len(self.prefix_tokens )
__snake_case : List[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self : List[str] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : List[str] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = Path(lowercase_ )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__snake_case : List[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , lowercase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase_ , """wb""" ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (str(lowercase_ ), str(lowercase_ ))
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**A_ )
spm.Load(str(A_ ) )
return spm
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
with open(A_ , """r""" ) as f:
return json.load(A_ )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
with open(A_ , """w""" ) as f:
json.dump(A_ , A_ , indent=2 )
| 357 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__UpperCamelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _A :
def __init__( self : int , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=16 , __magic_name__ : Dict=13 , __magic_name__ : Optional[Any]=7 , __magic_name__ : Optional[Any]=14 , __magic_name__ : Optional[int]=10 , __magic_name__ : int=19 , __magic_name__ : Tuple=5 , __magic_name__ : List[str]=4 , __magic_name__ : Tuple=True , __magic_name__ : int=16 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Any=4 , __magic_name__ : str=4 , __magic_name__ : Tuple="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Optional[Any]=[1, 2, 3, 4, 5] , __magic_name__ : Optional[int]=25 , __magic_name__ : List[Any]=5 , ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = d_model
__snake_case : str = parent
__snake_case : int = batch_size
__snake_case : Optional[Any] = prediction_length
__snake_case : Tuple = context_length
__snake_case : Tuple = cardinality
__snake_case : List[Any] = num_time_features
__snake_case : Optional[int] = lags_sequence
__snake_case : List[str] = embedding_dimension
__snake_case : List[str] = is_training
__snake_case : Any = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Any = context_length
__snake_case : Optional[int] = prediction_length + label_length
__snake_case : Tuple = label_length
__snake_case : List[Any] = moving_average
__snake_case : Union[str, Any] = autocorrelation_factor
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase__ ( self : Tuple , __magic_name__ : Any ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = config.context_length + max(config.lags_sequence )
__snake_case : List[str] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__snake_case : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
__snake_case : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__snake_case : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__snake_case : int = floats_tensor([self.batch_size, config.prediction_length] )
__snake_case : List[Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = self.get_config()
__snake_case : Any = self.prepare_autoformer_inputs_dict(__A )
return config, inputs_dict
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoformerModel(config=__A ).to(__A ).eval()
__snake_case : Optional[int] = model(**__A )
__snake_case : str = outputs.encoder_last_hidden_state
__snake_case : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = model.get_encoder()
encoder.save_pretrained(__A )
__snake_case : Tuple = AutoformerEncoder.from_pretrained(__A ).to(__A )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Dict = model.create_network_inputs(**__A )
__snake_case , __snake_case : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__snake_case : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__snake_case : Optional[int] = encoder(inputs_embeds=__A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__snake_case : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__snake_case : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__snake_case : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__snake_case : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = model.get_decoder()
decoder.save_pretrained(__A )
__snake_case : Optional[int] = AutoformerDecoder.from_pretrained(__A ).to(__A )
__snake_case : str = decoder(
trend=__A , inputs_embeds=__A , encoder_hidden_states=__A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__: Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__: str = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowercase__: Optional[int] = False
lowercase__: List[str] = False
lowercase__: Union[str, Any] = False
lowercase__: int = False
lowercase__: str = False
lowercase__: Optional[Any] = False
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = AutoformerModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__A , has_text_modality=__A )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
__snake_case , __snake_case : Any = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["""missing_keys"""] , [] )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = inspect.signature(getattr(__A , """forward""" ) )
# The main input is the name of the argument after `self`
__snake_case : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __A )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__A )
__snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : str = [*signature.parameters.keys()]
__snake_case : Optional[int] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__A )] , __A )
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = True
__snake_case : Optional[int] = getattr(self.model_tester , """seq_length""" , __A )
__snake_case : Any = getattr(self.model_tester , """decoder_seq_length""" , __A )
__snake_case : str = getattr(self.model_tester , """encoder_seq_length""" , __A )
__snake_case : Any = getattr(self.model_tester , """d_model""" , __A )
__snake_case : List[str] = getattr(self.model_tester , """num_attention_heads""" , __A )
__snake_case : Dict = d_model // num_attention_heads
for model_class in self.all_model_classes:
__snake_case : Any = True
__snake_case : Dict = False
__snake_case : Optional[int] = True
__snake_case : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__A , __A ) )
__snake_case : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : List[str] = True
__snake_case : str = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__A , __A ) )
__snake_case : Tuple = outputs.encoder_attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__snake_case : Any = len(__A )
__snake_case : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__A , __A )
# decoder attentions
__snake_case : int = outputs.decoder_attentions
self.assertIsInstance(__A , (list, tuple) )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__snake_case : Any = outputs.cross_attentions
self.assertIsInstance(__A , (list, tuple) )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__snake_case : str = True
__snake_case : Tuple = True
__snake_case : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 2 , len(__A ) )
__snake_case : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _a ( _lowerCamelCase : Union[str, Any]="train-batch.pt" ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowercase , repo_type="""dataset""" )
__snake_case : Union[str, Any] = torch.load(_lowercase , map_location=_lowercase )
return batch
@require_torch
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__A )
__snake_case : Union[str, Any] = prepare_batch()
with torch.no_grad():
__snake_case : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__snake_case : List[Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __A )
__snake_case : Any = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__A )
self.assertTrue(torch.allclose(output[0, :3, :3] , __A , atol=__A ) )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__A )
__snake_case : Dict = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__snake_case : List[str] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__snake_case : Union[str, Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __A )
__snake_case : Tuple = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__A )
self.assertTrue(torch.allclose(output[0, :3, :3] , __A , atol=__A ) )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__A )
__snake_case : Tuple = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__snake_case : str = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__snake_case : Tuple = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __A )
__snake_case : Optional[int] = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=__A )
__snake_case : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __A , rtol=1E-1 ) )
| 358 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: str = '''codegen'''
lowercase__: Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
__snake_case : List[str] = vocab_size
__snake_case : Union[str, Any] = n_ctx
__snake_case : int = n_positions
__snake_case : str = n_embd
__snake_case : Dict = n_layer
__snake_case : List[Any] = n_head
__snake_case : Any = n_inner
__snake_case : str = rotary_dim
__snake_case : List[str] = activation_function
__snake_case : Tuple = resid_pdrop
__snake_case : Dict = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = use_cache
__snake_case : Dict = bos_token_id
__snake_case : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , """pad_token_id""" , __magic_name__ ):
# TODO: how to do that better?
__snake_case : List[str] = 0
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
__snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Tuple = seqlen + 2
__snake_case : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : List[str] = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
__snake_case : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCamelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = 0
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
__snake_case : int = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[Any] = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
__snake_case : Any = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Dict = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
__snake_case : Any = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
__snake_case : Any = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case : Any = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
__snake_case : List[Any] = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case : Optional[Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""image_processor_type""" )
__snake_case : Tuple = CLIPImageProcessor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
__snake_case : Any = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
__snake_case : int = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Tuple = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
__snake_case : Dict = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """clip-base is not a local folder and is not a valid model identifier""" ):
__snake_case : List[str] = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__snake_case : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
__snake_case : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase_ )
__snake_case : Tuple = AutoImageProcessor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
__snake_case : int = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
__snake_case : Tuple = CustomImageProcessor.from_pretrained(lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase_ )
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
class _A ( __lowercase ):
lowercase__: str = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
__snake_case : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__snake_case : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 360 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__UpperCamelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _A ( __lowercase ):
lowercase__: Any = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask''']
lowercase__: List[str] = BartTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
__snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Any = """post_processor"""
__snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
__snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : int = tuple(state["""cls"""] )
__snake_case : Optional[int] = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Optional[Any] = add_prefix_space
__snake_case : List[str] = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
__snake_case : Optional[int] = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) )
__snake_case : List[Any] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
__snake_case : Union[str, Any] = value
def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 13 | 0 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _A ( lowerCamelCase__ , unittest.TestCase ):
lowercase__: Any = RoFormerTokenizer
lowercase__: Optional[int] = RoFormerTokenizerFast
lowercase__: List[Any] = True
lowercase__: List[Any] = True
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
super().setUp()
def lowercase__ ( self : List[str] , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **snake_case__ )
def lowercase__ ( self : Dict , **__magic_name__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **snake_case__ )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = '''永和服装饰品有限公司,今天天气非常好'''
__snake_case : List[Any] = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : List[str] = self.get_chinese_input_output_texts()
__snake_case : Tuple = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , output_text.split() )
__snake_case : str = tokens + [tokenizer.unk_token]
__snake_case : str = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = self.get_chinese_input_output_texts()
__snake_case : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , output_text.split() )
__snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
__snake_case : str = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
pass
| 361 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = a.name
__snake_case : Dict = b.name
__snake_case : Optional[int] = """"""
__snake_case : int = """"""
__snake_case : Any = a == b
__snake_case : List[Any] = name_a
__snake_case : List[str] = name_b
return res
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = os.path.dirname(_lowerCamelCase )
__snake_case : Dict = os.path.basename(_lowerCamelCase )
__snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : Optional[int] = set()
__snake_case : Optional[Any] = {}
__snake_case : Tuple = []
__snake_case : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
__snake_case : Any = inits[i].name
__snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__snake_case : int = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = """optimized_""" + model_file_name
__snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 13 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Any=7 , __magic_name__ : List[str]=3 , __magic_name__ : Tuple=30 , __magic_name__ : Dict=4_00 , __magic_name__ : int=True , __magic_name__ : int=None , __magic_name__ : int=True , __magic_name__ : Tuple=1 / 2_55 , __magic_name__ : Any=True , __magic_name__ : Any=[0.5, 0.5, 0.5] , __magic_name__ : Dict=[0.5, 0.5, 0.5] , __magic_name__ : List[str]=True , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
__snake_case : Optional[int] = parent
__snake_case : Any = batch_size
__snake_case : Any = num_channels
__snake_case : List[str] = min_resolution
__snake_case : Any = max_resolution
__snake_case : Optional[Any] = do_resize
__snake_case : List[Any] = size
__snake_case : int = do_rescale
__snake_case : Any = rescale_factor
__snake_case : Union[str, Any] = do_normalize
__snake_case : List[str] = image_mean
__snake_case : Dict = image_std
__snake_case : Any = do_pad
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowercase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int=False ) -> Any:
"""simple docstring"""
if not batched:
__snake_case : Dict = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__snake_case : Optional[int] = image.size
else:
__snake_case : str = image.shape[1], image.shape[2]
if w < h:
__snake_case : Any = int(self.size["""shortest_edge"""] * h / w )
__snake_case : Optional[int] = self.size["shortest_edge"]
elif w > h:
__snake_case : List[str] = self.size["shortest_edge"]
__snake_case : int = int(self.size["""shortest_edge"""] * w / h )
else:
__snake_case : Dict = self.size["shortest_edge"]
__snake_case : Union[str, Any] = self.size["shortest_edge"]
else:
__snake_case : Union[str, Any] = []
for image in image_inputs:
__snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : Dict = max(lowerCAmelCase__ , key=lambda __magic_name__ : item[0] )[0]
__snake_case : Any = max(lowerCAmelCase__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( a__ , unittest.TestCase ):
lowercase__: Dict = DetrImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = DetrImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_rescale""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_pad""" ) )
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
__snake_case : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__snake_case : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
__snake_case : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__snake_case : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[str] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__snake_case : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__snake_case : Union[str, Any] = json.loads(f.read() )
__snake_case : int = {"image_id": 3_97_69, "annotations": target}
# encode them
__snake_case : Optional[Any] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__snake_case : Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
__snake_case : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase__ )
__snake_case : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
__snake_case : Optional[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase__ ) )
# verify boxes
__snake_case : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase__ )
__snake_case : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase__ ) )
# verify is_crowd
__snake_case : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase__ ) )
# verify class_labels
__snake_case : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase__ ) )
# verify orig_size
__snake_case : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase__ ) )
# verify size
__snake_case : Dict = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase__ ) )
@slow
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__snake_case : Optional[int] = json.loads(f.read() )
__snake_case : List[str] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
__snake_case : Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__snake_case : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__snake_case : Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
__snake_case : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase__ )
__snake_case : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
__snake_case : str = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase__ ) )
# verify boxes
__snake_case : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase__ )
__snake_case : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[int] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase__ ) )
# verify is_crowd
__snake_case : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase__ ) )
# verify class_labels
__snake_case : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase__ ) )
# verify masks
__snake_case : Any = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase__ )
# verify orig_size
__snake_case : Union[str, Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase__ ) )
# verify size
__snake_case : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase__ ) )
| 362 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ["small", "medium", "large"]
__UpperCamelCase = "lm_head.decoder.weight"
__UpperCamelCase = "lm_head.weight"
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase )
__snake_case : Optional[int] = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__UpperCamelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 | 0 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = {}
__snake_case : Union[str, Any] = tokenizer(example["""content"""] , truncation=_lowerCamelCase )["""input_ids"""]
__snake_case : Union[str, Any] = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
__UpperCamelCase = HfArgumentParser(PretokenizationArguments)
__UpperCamelCase = parser.parse_args()
if args.num_workers is None:
__UpperCamelCase = multiprocessing.cpu_count()
__UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__UpperCamelCase = time.time()
__UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
__UpperCamelCase = time.time()
__UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 363 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
__snake_case , __snake_case : str = head.next, head
while fast and fast.next:
__snake_case : List[str] = fast.next.next
__snake_case : str = slow.next
__snake_case : Dict = slow.next
__snake_case : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
__snake_case : Dict = None
while second:
__snake_case : List[str] = second.next
__snake_case : List[str] = node
__snake_case : Any = second
__snake_case : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__snake_case : int = node.next
__snake_case : int = head.next
return True
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__snake_case : Tuple = head
while fast and fast.next:
__snake_case , __snake_case : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
__snake_case : Any = [slow.val]
while slow.next:
__snake_case : Any = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__snake_case : str = cur.next
return True
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
if not head or not head.next:
return True
__snake_case : Tuple = {}
__snake_case : Any = 0
while head:
if head.val in d:
d[head.val].append(snake_case__ )
else:
__snake_case : Any = [pos]
__snake_case : int = head.next
pos += 1
__snake_case : List[str] = pos - 1
__snake_case : str = 0
for v in d.values():
if len(snake_case__ ) % 2 != 0:
middle += 1
else:
__snake_case : Optional[Any] = 0
for i in range(0 , len(snake_case__ ) ):
if v[i] + v[len(snake_case__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 364 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
__snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : str = value
else:
__snake_case : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = []
__snake_case : List[Any] = fairseq_model.state_dict()
__snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__snake_case : Dict = """weight_g"""
elif "weight_v" in name:
__snake_case : List[str] = """weight_v"""
elif "weight" in name:
__snake_case : str = """weight"""
elif "bias" in name:
__snake_case : int = """bias"""
else:
__snake_case : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
__snake_case : Optional[int] = name.split(""".""" )
__snake_case : Dict = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = SEWConfig()
if is_finetuned:
__snake_case : List[Any] = model.wav_encoder.wav_model.cfg
else:
__snake_case : Optional[Any] = model.cfg
__snake_case : Tuple = fs_config.conv_bias
__snake_case : List[Any] = eval(fs_config.conv_feature_layers )
__snake_case : List[Any] = [x[0] for x in conv_layers]
__snake_case : Dict = [x[1] for x in conv_layers]
__snake_case : Tuple = [x[2] for x in conv_layers]
__snake_case : List[str] = """gelu"""
__snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__snake_case : Optional[int] = 0.0
__snake_case : Optional[Any] = fs_config.activation_fn.name
__snake_case : Dict = fs_config.encoder_embed_dim
__snake_case : Dict = 0.02
__snake_case : Any = fs_config.encoder_ffn_embed_dim
__snake_case : Tuple = 1E-5
__snake_case : Dict = fs_config.encoder_layerdrop
__snake_case : Any = fs_config.encoder_attention_heads
__snake_case : int = fs_config.conv_pos_groups
__snake_case : Tuple = fs_config.conv_pos
__snake_case : Optional[int] = len(_lowerCamelCase )
__snake_case : int = fs_config.encoder_layers
__snake_case : Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case : Union[str, Any] = model.cfg
__snake_case : Tuple = fs_config.final_dropout
__snake_case : Tuple = fs_config.layerdrop
__snake_case : Any = fs_config.activation_dropout
__snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case : Tuple = fs_config.attention_dropout
__snake_case : List[Any] = fs_config.dropout_input
__snake_case : Optional[Any] = fs_config.dropout
__snake_case : str = fs_config.mask_channel_length
__snake_case : Any = fs_config.mask_channel_prob
__snake_case : int = fs_config.mask_length
__snake_case : str = fs_config.mask_prob
__snake_case : str = """Wav2Vec2FeatureExtractor"""
__snake_case : Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int:
"""simple docstring"""
if is_finetuned:
__snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case : int = convert_config(model[0] , _lowerCamelCase )
__snake_case : Dict = model[0].eval()
__snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__snake_case : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case : str = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Optional[Any] = target_dict.bos_index
__snake_case : Tuple = target_dict.pad_index
__snake_case : List[str] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : List[str] = len(target_dict.symbols )
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case : List[str] = SEWForCTC(_lowerCamelCase )
else:
__snake_case : List[str] = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 13 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCamelCase = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__UpperCamelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = None
# source code of `config_class`
__snake_case : Dict = inspect.getsource(lowerCAmelCase__ )
__snake_case : Optional[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
__snake_case : Any = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__snake_case : Optional[Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__snake_case : Optional[int] = ckpt_name
break
return checkpoint
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__snake_case : int = get_checkpoint_from_config_class(lowerCAmelCase__ )
__snake_case : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__snake_case : Union[str, Any] = """\n""".join(sorted(lowerCAmelCase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 365 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[int] = pentagonal_nums[j]
__snake_case : str = pentagonal_i + pentagonal_j
__snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 13 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCamelCase = 250004
__UpperCamelCase = 250020
@require_sentencepiece
@require_tokenizers
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase__: Union[str, Any] = MBartaaTokenizer
lowercase__: Union[str, Any] = MBartaaTokenizerFast
lowercase__: Any = True
lowercase__: List[Any] = True
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Union[str, Any] = MBartaaTokenizer(UpperCamelCase__ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = '''<s>'''
__snake_case : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCamelCase__ ) , 10_54 )
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = MBartaaTokenizer(UpperCamelCase__ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=UpperCamelCase__ )
__snake_case : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__snake_case : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case : Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Any = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__snake_case : Union[str, Any] = tempfile.mkdtemp()
__snake_case : str = tokenizer_r.save_pretrained(UpperCamelCase__ )
__snake_case : Tuple = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case : Optional[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
__snake_case : List[str] = tokenizer_r.from_pretrained(UpperCamelCase__ )
__snake_case : Tuple = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=True
__snake_case : Union[str, Any] = tempfile.mkdtemp()
__snake_case : Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
__snake_case : str = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
__snake_case : Optional[int] = tokenizer_r.from_pretrained(UpperCamelCase__ )
__snake_case : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=False
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
__snake_case : Any = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase__ )
__snake_case : Tuple = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
lowercase__: Optional[int] = '''facebook/mbart-large-50-one-to-many-mmt'''
lowercase__: str = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowercase__: Any = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowercase__: Dict = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case : List[str] = 1
return cls
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 25_00_38 )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
__snake_case : Optional[Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__snake_case : str = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
__snake_case : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , UpperCamelCase__ )
__snake_case : List[str] = 10
__snake_case : str = self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ).input_ids[0]
self.assertEqual(ids[0] , UpperCamelCase__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_53, 25_00_01] )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__ )
__snake_case : Optional[Any] = MBartaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__ )
@require_torch
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="""pt""" )
__snake_case : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors="""pt""" )
__snake_case : int = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10 , return_tensors="""pt""" )
__snake_case : List[Any] = targets['''input_ids''']
__snake_case : List[Any] = shift_tokens_right(UpperCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[25_00_04, 62, 30_34, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 366 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : Dict = output[output != -float("""inf""" )]
__snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Tuple = [[1, 0], [1, 1]]
__snake_case : Union[str, Any] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Dict = 1
__snake_case : int = 2
class _A ( tf.Module ):
def __init__( self : Tuple , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : Union[str, Any] = [[2], [1_02, 1_03]]
__snake_case : Tuple = [[1], [1, 1]]
__snake_case : List[str] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : Tuple = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : str = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : int = CompleteSentenceTransformer()
__snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Tuple = complete_model(__magic_name__ )
__snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : str = 14
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = """Hello, my dog is cute and"""
__snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : str = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : int = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 13 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def _a ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 367 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
__snake_case : int = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("""""" )
print(len(_lowerCamelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 13 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
__snake_case : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase = logging.getLogger(__name__)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if metric == "rouge2":
__snake_case : int = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__snake_case : Optional[Any] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__snake_case : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
__snake_case : Dict = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class _A ( pl.Callback ):
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase__ )
@rank_zero_only
def lowercase__ ( self : Dict , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule , __magic_name__ : str , __magic_name__ : Tuple=True ) -> None:
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
__snake_case : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__snake_case : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__snake_case : Union[str, Any] = od / """test_results.txt"""
__snake_case : Union[str, Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__snake_case : Optional[int] = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
__snake_case : Any = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCamelCase__ )
with open(UpperCamelCase__ , """a+""" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__snake_case : Optional[int] = metrics[key]
if isinstance(UpperCamelCase__ , torch.Tensor ):
__snake_case : List[Any] = val.item()
__snake_case : Optional[int] = f'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
__snake_case : Optional[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(UpperCamelCase__ )
@rank_zero_only
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
try:
__snake_case : int = pl_module.model.model.num_parameters()
except AttributeError:
__snake_case : Optional[Any] = pl_module.model.num_parameters()
__snake_case : Optional[Any] = count_trainable_parameters(UpperCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def lowercase__ ( self : Dict , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule ) -> Union[str, Any]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase__ , UpperCamelCase__ , """test""" )
@rank_zero_only
def lowercase__ ( self : Optional[Any] , __magic_name__ : pl.Trainer , __magic_name__ : Any ) -> Any:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 368 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = 'Hello world! cécé herlolip'
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
__snake_case : Tuple = roberta.model.encoder.sentence_encoder
__snake_case : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__snake_case : List[str] = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , snake_case_ )
__snake_case : str = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__snake_case : Dict = roberta_sent_encoder.embed_tokens.weight
__snake_case : Any = roberta_sent_encoder.embed_positions.weight
__snake_case : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__snake_case : Dict = roberta_sent_encoder.layer_norm.weight
__snake_case : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__snake_case : int = model.roberta.encoder.layer[i]
__snake_case : List[str] = roberta_sent_encoder.layers[i]
__snake_case : int = layer.attention
__snake_case : int = roberta_layer.self_attn_layer_norm.weight
__snake_case : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
__snake_case : List[str] = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__snake_case : str = roberta_layer.self_attn.q_proj.weight
__snake_case : int = roberta_layer.self_attn.q_proj.bias
__snake_case : Any = roberta_layer.self_attn.k_proj.weight
__snake_case : Optional[Any] = roberta_layer.self_attn.k_proj.bias
__snake_case : Dict = roberta_layer.self_attn.v_proj.weight
__snake_case : str = roberta_layer.self_attn.v_proj.bias
# self-attention output
__snake_case : List[Any] = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__snake_case : List[Any] = roberta_layer.self_attn.out_proj.weight
__snake_case : Any = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__snake_case : Optional[Any] = roberta_layer.final_layer_norm.weight
__snake_case : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
__snake_case : List[Any] = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__snake_case : Optional[Any] = roberta_layer.fca.weight
__snake_case : Optional[int] = roberta_layer.fca.bias
# output
__snake_case : Any = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__snake_case : Tuple = roberta_layer.fca.weight
__snake_case : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
__snake_case : Union[str, Any] = roberta.model.classification_heads["""mnli"""].dense.weight
__snake_case : int = roberta.model.classification_heads["""mnli"""].dense.bias
__snake_case : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
__snake_case : List[str] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__snake_case : str = roberta.model.encoder.lm_head.dense.weight
__snake_case : Optional[int] = roberta.model.encoder.lm_head.dense.bias
__snake_case : Optional[int] = roberta.model.encoder.lm_head.layer_norm.weight
__snake_case : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
__snake_case : List[str] = roberta.model.encoder.lm_head.weight
__snake_case : str = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__snake_case : List[str] = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
__snake_case : int = model(snake_case_ )[0]
if classification_head:
__snake_case : Tuple = roberta.model.classification_heads["""mnli"""](roberta.extract_features(snake_case_ ) )
else:
__snake_case : str = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
__snake_case : Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__snake_case : Dict = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 369 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _A :
lowercase__: str
lowercase__: Optional[str] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return self.version_str
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ".".join(str(_lowerCamelCase ) for v in version_tuple )
| 13 | 0 |
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if isinstance(a__ , a__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(a__ , a__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__snake_case : Any = False
if num < 0:
__snake_case : Optional[Any] = True
__snake_case : Union[str, Any] = -num
__snake_case : str = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(a__ ) for e in binary )
return "0b" + "".join(str(a__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__snake_case : Tuple = """"""
while len(_lowerCamelCase ) % 3 != 0:
__snake_case : Any = """0""" + bin_string
__snake_case : Tuple = [
bin_string[index : index + 3]
for index in range(len(_lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case : Tuple = 0
for index, val in enumerate(_lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) )
oct_string += str(_lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCamelCase = logging.get_logger(__name__)
# General docstring
__UpperCamelCase = "RegNetConfig"
# Base docstring
__UpperCamelCase = "facebook/regnet-y-040"
__UpperCamelCase = [1, 1088, 7, 7]
# Image classification docstring
__UpperCamelCase = "facebook/regnet-y-040"
__UpperCamelCase = "tabby, tabby cat"
__UpperCamelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _A ( nn.Module ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , ) -> Tuple:
"""simple docstring"""
super().__init__()
__snake_case : Optional[Any] = nn.Convad(
_A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , groups=_A , bias=_A , )
__snake_case : str = nn.BatchNormad(_A )
__snake_case : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase__ ( self : List[str] , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = self.convolution(_A )
__snake_case : Dict = self.normalization(_A )
__snake_case : Any = self.activation(_A )
return hidden_state
class _A ( nn.Module ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig ) -> str:
"""simple docstring"""
super().__init__()
__snake_case : Optional[Any] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__snake_case : Optional[int] = config.num_channels
def lowercase__ ( self : List[str] , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__snake_case : int = self.embedder(_A )
return hidden_state
class _A ( nn.Module ):
def __init__( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 ) -> Tuple:
"""simple docstring"""
super().__init__()
__snake_case : str = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A )
__snake_case : Tuple = nn.BatchNormad(_A )
def lowercase__ ( self : str , __magic_name__ : Tensor ) -> Tensor:
"""simple docstring"""
__snake_case : Optional[Any] = self.convolution(_A )
__snake_case : List[str] = self.normalization(_A )
return hidden_state
class _A ( nn.Module ):
def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> str:
"""simple docstring"""
super().__init__()
__snake_case : Dict = nn.AdaptiveAvgPoolad((1, 1) )
__snake_case : List[str] = nn.Sequential(
nn.Convad(_A , _A , kernel_size=1 ) , nn.ReLU() , nn.Convad(_A , _A , kernel_size=1 ) , nn.Sigmoid() , )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = self.pooler(_A )
__snake_case : Dict = self.attention(_A )
__snake_case : Optional[Any] = hidden_state * attention
return hidden_state
class _A ( nn.Module ):
def __init__( self : Any , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__snake_case : List[str] = in_channels != out_channels or stride != 1
__snake_case : Tuple = max(1 , out_channels // config.groups_width )
__snake_case : Union[str, Any] = (
RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__snake_case : Any = nn.Sequential(
RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__snake_case : int = ACTaFN[config.hidden_act]
def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = hidden_state
__snake_case : str = self.layer(_A )
__snake_case : str = self.shortcut(_A )
hidden_state += residual
__snake_case : Tuple = self.activation(_A )
return hidden_state
class _A ( nn.Module ):
def __init__( self : Dict , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__snake_case : Optional[int] = in_channels != out_channels or stride != 1
__snake_case : int = max(1 , out_channels // config.groups_width )
__snake_case : Dict = (
RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__snake_case : List[str] = nn.Sequential(
RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__snake_case : Dict = ACTaFN[config.hidden_act]
def lowercase__ ( self : Union[str, Any] , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : str = hidden_state
__snake_case : Any = self.layer(_A )
__snake_case : Any = self.shortcut(_A )
hidden_state += residual
__snake_case : Optional[Any] = self.activation(_A )
return hidden_state
class _A ( nn.Module ):
def __init__( self : str , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__snake_case : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_A , _A , _A , stride=_A , ) , *[layer(_A , _A , _A ) for _ in range(depth - 1 )] , )
def lowercase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
__snake_case : Dict = self.layers(_A )
return hidden_state
class _A ( nn.Module ):
def __init__( self : Dict , __magic_name__ : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__snake_case : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_A , config.depths[1:] ):
self.stages.append(RegNetStage(_A , _A , _A , depth=_A ) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__snake_case : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case : Optional[int] = hidden_states + (hidden_state,)
__snake_case : Tuple = stage_module(_A )
if output_hidden_states:
__snake_case : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class _A ( snake_case_ ):
lowercase__: int = RegNetConfig
lowercase__: Optional[int] = "regnet"
lowercase__: List[str] = "pixel_values"
lowercase__: Dict = True
def lowercase__ ( self : List[Any] , __magic_name__ : Dict ) -> Dict:
"""simple docstring"""
if isinstance(_A , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int=False ) -> Dict:
"""simple docstring"""
if isinstance(_A , _A ):
__snake_case : List[str] = value
__UpperCamelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCamelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _A ( snake_case_ ):
def __init__( self : Optional[Any] , __magic_name__ : str ) -> int:
"""simple docstring"""
super().__init__(_A )
__snake_case : Any = config
__snake_case : List[str] = RegNetEmbeddings(_A )
__snake_case : str = RegNetEncoder(_A )
__snake_case : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__snake_case : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Any = self.embedder(_A )
__snake_case : int = self.encoder(
_A , output_hidden_states=_A , return_dict=_A )
__snake_case : Optional[Any] = encoder_outputs[0]
__snake_case : Dict = self.pooler(_A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _A ( snake_case_ ):
def __init__( self : Optional[int] , __magic_name__ : int ) -> Dict:
"""simple docstring"""
super().__init__(_A )
__snake_case : List[Any] = config.num_labels
__snake_case : Optional[Any] = RegNetModel(_A )
# classification head
__snake_case : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[torch.LongTensor] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__snake_case : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Union[str, Any] = self.regnet(_A , output_hidden_states=_A , return_dict=_A )
__snake_case : str = outputs.pooler_output if return_dict else outputs[1]
__snake_case : str = self.classifier(_A )
__snake_case : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case : Union[str, Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case : int = 'single_label_classification'
else:
__snake_case : Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case : List[str] = MSELoss()
if self.num_labels == 1:
__snake_case : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case : Union[str, Any] = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case : Optional[int] = BCEWithLogitsLoss()
__snake_case : List[Any] = loss_fct(_A , _A )
if not return_dict:
__snake_case : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _a ( ) -> str:
"""simple docstring"""
__snake_case : str = 10
__snake_case : List[str] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
__snake_case : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(A__ ) ),
} , features=A__ , )
return dataset
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=A__ )
return filename
# FILE_CONTENT + files
__UpperCamelCase = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
__snake_case : Union[str, Any] = FILE_CONTENT
with open(A__ , """w""" ) as f:
f.write(A__ )
return filename
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
import bza
__snake_case : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
__snake_case : Tuple = bytes(A__ , """utf-8""" )
with bza.open(A__ , """wb""" ) as f:
f.write(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
import gzip
__snake_case : Dict = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
__snake_case : int = bytes(A__ , """utf-8""" )
with gzip.open(A__ , """wb""" ) as f:
f.write(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
__snake_case : Optional[int] = bytes(A__ , """utf-8""" )
with lza.frame.open(A__ , """wb""" ) as f:
f.write(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(A__ , """w""" ) as archive:
archive.write(A__ , arcname=os.path.basename(A__ ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
import tarfile
__snake_case : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(A__ , """w""" ) as f:
f.add(A__ , arcname=os.path.basename(A__ ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
__snake_case : Dict = bytes(A__ , """utf-8""" )
with lzma.open(A__ , """wb""" ) as f:
f.write(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
import zipfile
__snake_case : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.basename(A__ ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
__snake_case : Tuple = bytes(A__ , """utf-8""" )
with zstd.open(A__ , """wb""" ) as f:
f.write(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
__snake_case : Tuple = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(A__ , """w""" ) as f:
f.write(A__ )
return filename
__UpperCamelCase = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
__UpperCamelCase = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
__UpperCamelCase = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
__UpperCamelCase = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
__UpperCamelCase = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="""session""" )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = datasets.Dataset.from_dict(A__ )
__snake_case : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(A__ ) ) as con:
__snake_case : Any = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(A__ , """w""" , newline="""""" ) as f:
__snake_case : Union[str, Any] = csv.DictWriter(A__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(A__ , """w""" , newline="""""" ) as f:
__snake_case : Optional[int] = csv.DictWriter(A__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
import bza
__snake_case : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(A__ , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(A__ , """wb""" ) as f:
f.write(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.basename(A__ ) )
f.write(A__ , arcname=os.path.basename(A__ ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(A__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.join("""main_dir""" , os.path.basename(A__ ) ) )
f.write(A__ , arcname=os.path.join("""main_dir""" , os.path.basename(A__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
__snake_case : str = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(A__ , """wb""" ) as f:
__snake_case : Optional[int] = pq.ParquetWriter(A__ , schema=A__ )
__snake_case : str = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(A__ ) )] for k in DATA[0]} , schema=A__ )
writer.write_table(A__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
__snake_case : List[str] = {"""data""": DATA}
with open(A__ , """w""" ) as f:
json.dump(A__ , A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
__snake_case : Tuple = {"""data""": DATA_DICT_OF_LISTS}
with open(A__ , """w""" ) as f:
json.dump(A__ , A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(A__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(A__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(A__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(A__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(A__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(A__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(A__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(A__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
import gzip
__snake_case : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(A__ , """rb""" ) as orig_file:
with gzip.open(A__ , """wb""" ) as zipped_file:
zipped_file.writelines(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
import gzip
__snake_case : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(A__ , """rb""" ) as orig_file:
with gzip.open(A__ , """wb""" ) as zipped_file:
zipped_file.writelines(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.basename(A__ ) )
f.write(A__ , arcname=os.path.basename(A__ ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.join("""nested""" , os.path.basename(A__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.join("""main_dir""" , os.path.basename(A__ ) ) )
f.write(A__ , arcname=os.path.join("""main_dir""" , os.path.basename(A__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(A__ , """w""" ) as f:
f.add(A__ , arcname=os.path.basename(A__ ) )
f.add(A__ , arcname=os.path.basename(A__ ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(A__ , """w""" ) as f:
f.add(A__ , arcname=os.path.join("""nested""" , os.path.basename(A__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = ["""0""", """1""", """2""", """3"""]
__snake_case : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(A__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = ["""0""", """1""", """2""", """3"""]
__snake_case : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(A__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = ["""0""", """1""", """2""", """3"""]
__snake_case : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(A__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.basename(A__ ) )
f.write(A__ , arcname=os.path.basename(A__ ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.join("""main_dir""" , os.path.basename(A__ ) ) )
f.write(A__ , arcname=os.path.join("""main_dir""" , os.path.basename(A__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(A__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
__snake_case : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(A__ )
return path
@pytest.fixture(scope="""session""" )
def _a ( ) -> int:
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _a ( ) -> Dict:
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(A__ , """w""" ) as f:
f.write(A__ , arcname=os.path.basename(A__ ) )
f.write(A__ , arcname=os.path.basename(A__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 350 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = x
__snake_case : Dict = y
for step in range(lowerCAmelCase__ ): # noqa: B007
__snake_case : Optional[int] = a * a - b * b + x
__snake_case : Union[str, Any] = 2 * a * b + y
__snake_case : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def _a ( _lowerCamelCase = 800 , _lowerCamelCase = 600 , _lowerCamelCase = -0.6 , _lowerCamelCase = 0 , _lowerCamelCase = 3.2 , _lowerCamelCase = 50 , _lowerCamelCase = True , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = Image.new("""RGB""" , (image_width, image_height) )
__snake_case : Any = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
__snake_case : List[Any] = figure_width / image_width * image_height
__snake_case : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
__snake_case : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__snake_case : Dict = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__snake_case : Union[str, Any] = get_color_coded_rgb(lowerCAmelCase__ )
else:
__snake_case : List[Any] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__UpperCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 351 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _A ( __lowercase ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__magic_name__ , """num_attention_heads""" ) )
class _A :
def __init__( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : str=13 , __magic_name__ : int=64 , __magic_name__ : str=3 , __magic_name__ : Tuple=3 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Any=[1_28, 2_56, 3_84] , __magic_name__ : List[Any]=[4, 6, 8] , __magic_name__ : Union[str, Any]=[2, 3, 4] , __magic_name__ : Union[str, Any]=[16, 16, 16] , __magic_name__ : Optional[Any]=0 , __magic_name__ : List[str]=[2, 2, 2] , __magic_name__ : int=[2, 2, 2] , __magic_name__ : List[str]=0.02 , __magic_name__ : Tuple=True , __magic_name__ : int=True , __magic_name__ : Dict=2 , ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : str = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Dict = num_channels
__snake_case : int = kernel_size
__snake_case : List[Any] = stride
__snake_case : Union[str, Any] = padding
__snake_case : List[str] = hidden_sizes
__snake_case : int = num_attention_heads
__snake_case : List[Any] = depths
__snake_case : Tuple = key_dim
__snake_case : List[str] = drop_path_rate
__snake_case : Tuple = patch_size
__snake_case : List[str] = attention_ratio
__snake_case : Tuple = mlp_ratio
__snake_case : int = initializer_range
__snake_case : Optional[int] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__snake_case : List[Any] = is_training
__snake_case : List[Any] = use_labels
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = LevitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = model(__magic_name__ )
__snake_case : Optional[Any] = (self.image_size, self.image_size)
__snake_case , __snake_case : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
__snake_case : Dict = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__snake_case : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : int = self.num_labels
__snake_case : Optional[int] = LevitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase__: List[Any] = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__: Optional[int] = False
lowercase__: str = False
lowercase__: List[Any] = False
lowercase__: Optional[int] = False
lowercase__: Tuple = False
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : str = LevitModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any] ):
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.hidden_states
__snake_case : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
__snake_case , __snake_case : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
__snake_case : List[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__snake_case : Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Any ) -> Dict:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__magic_name__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__snake_case : int = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
__snake_case : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = model(**__magic_name__ ).loss
loss.backward()
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__snake_case : int = False
__snake_case : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__magic_name__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__snake_case : Any = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
__snake_case : List[str] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Dict = model(**__magic_name__ ).loss
loss.backward()
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__magic_name__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
__snake_case : Tuple = problem_type["""title"""]
__snake_case : Optional[int] = problem_type["""num_labels"""]
__snake_case : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
__snake_case : int = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if problem_type["num_labels"] > 1:
__snake_case : Dict = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
__snake_case : str = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__magic_name__ ) as warning_list:
__snake_case : List[Any] = model(**__magic_name__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = LevitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : str = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__magic_name__ )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Optional[int] = model(**__magic_name__ )
# verify the logits
__snake_case : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 352 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = patch_size
__snake_case : List[str] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : List[str] = use_labels
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = attention_type
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[Any] = scope
__snake_case : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__snake_case : str = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__snake_case : str = self.num_labels
return config
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Any = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
# verify the logits shape
__snake_case : Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__: List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: List[str] = False
lowercase__: List[Any] = False
lowercase__: Dict = False
lowercase__: int = False
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = TimesformerModelTester(self )
__snake_case : List[Any] = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length
__snake_case : Tuple = self.model_tester.num_frames
__snake_case : str = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Optional[int] = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__snake_case : int = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.hidden_states
__snake_case : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__magic_name__ )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Dict = prepare_video()
__snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : int = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__UpperCamelCase = logging.getLogger(__name__)
@dataclass
class _A :
lowercase__: str
lowercase__: List[str]
lowercase__: Optional[List[str]]
@dataclass
class _A :
lowercase__: List[int]
lowercase__: List[int]
lowercase__: Optional[List[int]] = None
lowercase__: Optional[List[int]] = None
class _A ( lowercase_ ):
lowercase__: str = "train"
lowercase__: Optional[int] = "dev"
lowercase__: Union[str, Any] = "test"
class _A :
@staticmethod
def lowercase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def lowercase__ ( __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def lowercase__ ( __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str="[SEP]" , __magic_name__ : Union[str, Any]=False , __magic_name__ : int=False , __magic_name__ : Union[str, Any]=0 , __magic_name__ : Optional[Any]=0 , __magic_name__ : Dict=-1_00 , __magic_name__ : Any=0 , __magic_name__ : Any=True , ) -> List[InputFeatures]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(a__ )}
__snake_case : Union[str, Any] = []
for ex_index, example in enumerate(a__ ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , a__ , len(a__ ) )
__snake_case : Dict = []
__snake_case : Union[str, Any] = []
for word, label in zip(example.words , example.labels ):
__snake_case : List[str] = tokenizer.tokenize(a__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(a__ ) > 0:
tokens.extend(a__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(a__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__snake_case : Optional[int] = tokenizer.num_special_tokens_to_add()
if len(a__ ) > max_seq_length - special_tokens_count:
__snake_case : Tuple = tokens[: (max_seq_length - special_tokens_count)]
__snake_case : Dict = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__snake_case : Dict = [sequence_a_segment_id] * len(a__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__snake_case : Any = [cls_token] + tokens
__snake_case : Union[str, Any] = [pad_token_label_id] + label_ids
__snake_case : List[str] = [cls_token_segment_id] + segment_ids
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(a__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__snake_case : Any = [1 if mask_padding_with_zero else 0] * len(a__ )
# Zero-pad up to the sequence length.
__snake_case : Tuple = max_seq_length - len(a__ )
if pad_on_left:
__snake_case : Optional[Any] = ([pad_token] * padding_length) + input_ids
__snake_case : List[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__snake_case : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
__snake_case : Tuple = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(a__ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(a__ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(a__ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(a__ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(a__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : int = None
features.append(
InputFeatures(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , label_ids=a__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _A ( lowercase_ ):
lowercase__: List[InputFeatures]
lowercase__: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : List[Any] = None , __magic_name__ : Any=False , __magic_name__ : Tuple = Split.train , ) -> str:
"""simple docstring"""
__snake_case : Dict = os.path.join(
a__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(a__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : Optional[Any] = cached_features_file + """.lock"""
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__snake_case : str = torch.load(a__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__snake_case : Dict = token_classification_task.read_examples_from_file(a__ , a__ )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : Any = token_classification_task.convert_examples_to_features(
a__ , a__ , a__ , a__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=a__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , a__ )
def __len__( self : Dict ) -> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , __magic_name__ : List[str] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _A :
lowercase__: List[InputFeatures]
lowercase__: int = -100
def __init__( self : List[str] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Dict = None , __magic_name__ : Dict=False , __magic_name__ : Optional[Any] = Split.train , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = token_classification_task.read_examples_from_file(a__ , a__ )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : str = token_classification_task.convert_examples_to_features(
a__ , a__ , a__ , a__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=a__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : Optional[int] = tf.data.Dataset.from_generator(
a__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__snake_case : Optional[int] = tf.data.Dataset.from_generator(
a__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : str ) -> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Dict , __magic_name__ : Union[str, Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _a ( _lowerCamelCase ) -> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _a ( ) -> str:
"""simple docstring"""
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : List[str] = [1, 2, 3]
with pytest.raises(_lowerCamelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=2 )
with pytest.raises(_lowerCamelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = [1, 2]
__snake_case : Dict = {"""a""": 1, """b""": 2}
__snake_case : int = {"""a""": [1, 2], """b""": [3, 4]}
__snake_case : Tuple = {"""a""": {"""1""": 1}, """b""": 2}
__snake_case : Dict = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__snake_case : Tuple = [2, 3]
__snake_case : int = {"""a""": 2, """b""": 3}
__snake_case : str = {"""a""": [2, 3], """b""": [4, 5]}
__snake_case : str = {"""a""": {"""1""": 2}, """b""": 3}
__snake_case : Union[str, Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
| 354 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : str = 0
__snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if len(_lowerCamelCase ) <= 1:
return arr, 0
__snake_case : Any = len(_lowerCamelCase ) // 2
__snake_case : List[str] = arr[0:mid]
__snake_case : int = arr[mid:]
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = []
__snake_case : List[str] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__snake_case : Any = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
# an empty list should also have zero inversions
__snake_case : List[Any] = []
__snake_case : List[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Any = 2
while True:
if is_prime(lowerCamelCase_ ):
yield num
num += 1
def _a ( _lowerCamelCase = 200_0000 ) -> Optional[int]:
"""simple docstring"""
return sum(takewhile(lambda _lowerCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 355 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class _A ( __lowercase ):
lowercase__: Any = """swin2sr"""
lowercase__: Optional[Any] = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[int] , __magic_name__ : str=64 , __magic_name__ : Optional[Any]=1 , __magic_name__ : str=3 , __magic_name__ : Optional[int]=1_80 , __magic_name__ : Any=[6, 6, 6, 6, 6, 6] , __magic_name__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , __magic_name__ : List[str]=8 , __magic_name__ : Union[str, Any]=2.0 , __magic_name__ : str=True , __magic_name__ : List[str]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : str="gelu" , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=0.02 , __magic_name__ : List[str]=1E-5 , __magic_name__ : str=2 , __magic_name__ : List[Any]=1.0 , __magic_name__ : Tuple="1conv" , __magic_name__ : Dict="pixelshuffle" , **__magic_name__ : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_A )
__snake_case : Optional[int] = image_size
__snake_case : Tuple = patch_size
__snake_case : List[Any] = num_channels
__snake_case : Tuple = embed_dim
__snake_case : Optional[int] = depths
__snake_case : Dict = len(_A )
__snake_case : List[str] = num_heads
__snake_case : Optional[Any] = window_size
__snake_case : Optional[Any] = mlp_ratio
__snake_case : Dict = qkv_bias
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Dict = drop_path_rate
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = use_absolute_embeddings
__snake_case : Any = layer_norm_eps
__snake_case : List[str] = initializer_range
__snake_case : str = upscale
__snake_case : Any = img_range
__snake_case : List[str] = resi_connection
__snake_case : Optional[Any] = upsampler
| 356 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = CanineTokenizer
lowercase__: Optional[int] = False
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
__snake_case : Optional[Any] = 10_24
return tokenizer
@require_torch
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.canine_tokenizer
__snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Any = self.canine_tokenizer
__snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.canine_tokenizer
__snake_case : Optional[Any] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__snake_case : Any = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : str = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
__snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__snake_case : List[Any] = chr(0xE007 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE005
__snake_case : Tuple = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
__snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
__snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
__snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Dict = chr(0xE005 )
__snake_case : str = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__snake_case : Tuple = tokenizer.tokenize(__magic_name__ )
__snake_case : Any = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__snake_case : Optional[Any] = 0xE006
__snake_case : List[str] = chr(__magic_name__ )
__snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Any = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Tuple = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE006
__snake_case : int = chr(__magic_name__ )
__snake_case : List[Any] = [new_token_a]
__snake_case : Union[str, Any] = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__snake_case : Any = 0xE007
__snake_case : Any = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
__snake_case : Union[str, Any] = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : List[str] = """hello world"""
if self.space_between_special_tokens:
__snake_case : Union[str, Any] = """[CLS] hello world [SEP]"""
else:
__snake_case : List[Any] = input
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__snake_case : Dict = """a"""
__snake_case : Tuple = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
__snake_case : Dict = 0xE006
__snake_case : str = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
| 13 | 0 |
'''simple docstring'''
from random import randint, random
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = 5 , ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [[-1] * number_of_cells] # Create a highway without any car
__snake_case : Optional[int] = 0
__snake_case : int = max(_A , 0 )
while i < number_of_cells:
__snake_case : Optional[Any] = (
randint(0 , _A ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Dict = 0
__snake_case : Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(_A ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_A , -1 )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Any = len(_A )
# Beforce calculations, the highway is empty
__snake_case : Dict = [-1] * number_of_cells
for car_index in range(_A ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__snake_case : Tuple = min(highway_now[car_index] + 1 , _A )
# Number of empty cell before the next car
__snake_case : Optional[Any] = get_distance(_A , _A ) - 1
# We can't have the car causing an accident
__snake_case : List[Any] = min(next_highway[car_index] , _A )
if random() < probability:
# Randomly, a driver will slow down
__snake_case : List[str] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = len(highway[0] )
for i in range(_A ):
__snake_case : Optional[int] = update(highway[i] , _A , _A )
__snake_case : List[str] = [-1] * number_of_cells
for car_index in range(_A ):
__snake_case : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__snake_case : List[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
__snake_case : Optional[Any] = speed
highway.append(_A )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _A ( SCREAMING_SNAKE_CASE__ ):
lowercase__: int = '''vivit'''
def __init__( self : Optional[Any] , __magic_name__ : List[Any]=2_24 , __magic_name__ : Dict=32 , __magic_name__ : Dict=[2, 16, 16] , __magic_name__ : List[str]=3 , __magic_name__ : Union[str, Any]=7_68 , __magic_name__ : Tuple=12 , __magic_name__ : Dict=12 , __magic_name__ : str=30_72 , __magic_name__ : Tuple="gelu_fast" , __magic_name__ : Tuple=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1E-06 , __magic_name__ : Dict=True , **__magic_name__ : str , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : Any = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : int = layer_norm_eps
__snake_case : str = image_size
__snake_case : Optional[int] = num_frames
__snake_case : Tuple = tubelet_size
__snake_case : Dict = num_channels
__snake_case : List[str] = qkv_bias
super().__init__(**A__ )
| 358 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: str = '''codegen'''
lowercase__: Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
__snake_case : List[str] = vocab_size
__snake_case : Union[str, Any] = n_ctx
__snake_case : int = n_positions
__snake_case : str = n_embd
__snake_case : Dict = n_layer
__snake_case : List[Any] = n_head
__snake_case : Any = n_inner
__snake_case : str = rotary_dim
__snake_case : List[str] = activation_function
__snake_case : Tuple = resid_pdrop
__snake_case : Dict = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = use_cache
__snake_case : Dict = bos_token_id
__snake_case : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , """pad_token_id""" , __magic_name__ ):
# TODO: how to do that better?
__snake_case : List[str] = 0
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
__snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Tuple = seqlen + 2
__snake_case : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : List[str] = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
__snake_case : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
| 13 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __a ):
def __init__( self : Optional[int] , __magic_name__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> List[str]:
"""simple docstring"""
super().__init__()
__snake_case : Optional[Any] = nn.ModuleList(a__ )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : Union[torch.Tensor, float, int] , __magic_name__ : torch.Tensor , __magic_name__ : List[torch.tensor] , __magic_name__ : List[float] , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[Dict[str, Any]] = None , __magic_name__ : bool = False , __magic_name__ : bool = True , ) -> int:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a__ , a__ , self.nets ) ):
__snake_case , __snake_case : Union[str, Any] = controlnet(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
# merge samples
if i == 0:
__snake_case , __snake_case : int = down_samples, mid_sample
else:
__snake_case : Union[str, Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a__ , a__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase__ ( self : List[str] , __magic_name__ : Union[str, os.PathLike] , __magic_name__ : bool = True , __magic_name__ : Callable = None , __magic_name__ : bool = False , __magic_name__ : Optional[str] = None , ) -> Tuple:
"""simple docstring"""
__snake_case : Any = 0
__snake_case : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a__ , is_main_process=a__ , save_function=a__ , safe_serialization=a__ , variant=a__ , )
idx += 1
__snake_case : List[Any] = model_path_to_save + f'''_{idx}'''
@classmethod
def lowercase__ ( cls : Optional[int] , __magic_name__ : Optional[Union[str, os.PathLike]] , **__magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
__snake_case : Any = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(a__ ):
__snake_case : Optional[int] = ControlNetModel.from_pretrained(a__ , **a__ )
controlnets.append(a__ )
idx += 1
__snake_case : Any = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(a__ )} controlnets loaded from {pretrained_model_path}.''' )
if len(a__ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(a__ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(a__ )
| 359 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=_lowerCamelCase )
__snake_case : Any = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase )
EnvironmentCommand.register_subcommand(_lowerCamelCase )
TestCommand.register_subcommand(_lowerCamelCase )
RunBeamCommand.register_subcommand(_lowerCamelCase )
DummyDataCommand.register_subcommand(_lowerCamelCase )
# Parse args
__snake_case : str = parser.parse_known_args()
if not hasattr(_lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
__snake_case : str = parse_unknown_args(_lowerCamelCase )
# Run
__snake_case : int = args.func(_lowerCamelCase , **_lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 360 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__UpperCamelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _A ( __lowercase ):
lowercase__: Any = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask''']
lowercase__: List[str] = BartTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
__snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Any = """post_processor"""
__snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
__snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : int = tuple(state["""cls"""] )
__snake_case : Optional[int] = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Optional[Any] = add_prefix_space
__snake_case : List[str] = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
__snake_case : Optional[int] = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) )
__snake_case : List[Any] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
__snake_case : Union[str, Any] = value
def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Any = analyze_text(__snake_case )
__snake_case : List[str] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__snake_case : Optional[int] = sum(single_char_strings.values() )
# one length string
__snake_case : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__snake_case : Optional[int] = single_char_strings[ch]
__snake_case : Optional[int] = my_str / all_sum
my_fir_sum += prob * math.loga(__snake_case ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__snake_case : Optional[Any] = sum(two_char_strings.values() )
__snake_case : Union[str, Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__snake_case : Optional[int] = cha + cha
if sequence in two_char_strings:
__snake_case : Tuple = two_char_strings[sequence]
__snake_case : List[str] = int(__snake_case ) / all_sum
my_sec_sum += prob * math.loga(__snake_case )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _a ( _lowerCamelCase ) -> tuple[dict, dict]:
"""simple docstring"""
__snake_case : Any = Counter() # type: ignore
__snake_case : int = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _a ( ) -> Dict:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 361 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = a.name
__snake_case : Dict = b.name
__snake_case : Optional[int] = """"""
__snake_case : int = """"""
__snake_case : Any = a == b
__snake_case : List[Any] = name_a
__snake_case : List[str] = name_b
return res
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = os.path.dirname(_lowerCamelCase )
__snake_case : Dict = os.path.basename(_lowerCamelCase )
__snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : Optional[int] = set()
__snake_case : Optional[Any] = {}
__snake_case : Tuple = []
__snake_case : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
__snake_case : Any = inits[i].name
__snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__snake_case : int = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = """optimized_""" + model_file_name
__snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 13 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( _lowerCamelCase = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(UpperCAmelCase__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
__snake_case : str = QuantumRegister(UpperCAmelCase__ , """qr""" )
__snake_case : int = ClassicalRegister(UpperCAmelCase__ , """cr""" )
__snake_case : Optional[Any] = QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Optional[Any] = number_of_qubits
for i in range(UpperCAmelCase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCAmelCase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCAmelCase__ , UpperCAmelCase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCAmelCase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCAmelCase__ , UpperCAmelCase__ )
# simulate with 10000 shots
__snake_case : List[str] = Aer.get_backend("""qasm_simulator""" )
__snake_case : Tuple = execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1_0000 )
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 362 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ["small", "medium", "large"]
__UpperCamelCase = "lm_head.decoder.weight"
__UpperCamelCase = "lm_head.weight"
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase )
__snake_case : Optional[int] = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__UpperCamelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__UpperCamelCase = True
except (ImportError, AttributeError):
__UpperCamelCase = object
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
pass
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger("transformers-cli/serving")
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE_ , args.host , args.port , args.workers )
class _A ( snake_case__ ):
lowercase__: dict
class _A ( snake_case__ ):
lowercase__: List[str]
lowercase__: Optional[List[int]]
class _A ( snake_case__ ):
lowercase__: str
class _A ( snake_case__ ):
lowercase__: Any
class _A ( snake_case__ ):
@staticmethod
def lowercase__ ( __magic_name__ : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=_A , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=_A , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=_A , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=_A , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=_A , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=_A , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=_A , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=_A , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=_A )
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = pipeline
__snake_case : Optional[int] = host
__snake_case : str = port
__snake_case : Union[str, Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f'''Serving model over {host}:{port}''' )
__snake_case : Any = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=_A , response_class=_A , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=_A , response_class=_A , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=_A , response_class=_A , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=_A , response_class=_A , methods=["""POST"""] , ),
] , timeout=6_00 , )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase__ ( self : Tuple , __magic_name__ : int = Body(_A , embed=_A ) , __magic_name__ : str = Body(_A , embed=_A ) ) -> str:
"""simple docstring"""
try:
__snake_case : List[str] = self._pipeline.tokenizer.tokenize(_A )
if return_ids:
__snake_case : Optional[int] = self._pipeline.tokenizer.convert_tokens_to_ids(_A )
return ServeTokenizeResult(tokens=_A , tokens_ids=_A )
else:
return ServeTokenizeResult(tokens=_A )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(_A )} )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] = Body(_A , embed=_A ) , __magic_name__ : Optional[Any] = Body(_A , embed=_A ) , __magic_name__ : Optional[Any] = Body(_A , embed=_A ) , ) -> Any:
"""simple docstring"""
try:
__snake_case : Optional[int] = self._pipeline.tokenizer.decode(_A , _A , _A )
return ServeDeTokenizeResult(model="""""" , text=_A )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(_A )} )
async def lowercase__ ( self : str , __magic_name__ : Optional[Any]=Body(_A , embed=_A ) ) -> Union[str, Any]:
"""simple docstring"""
if len(_A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__snake_case : List[Any] = self._pipeline(_A )
return ServeForwardResult(output=_A )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(_A )} )
| 363 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _A ( a__ ):
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """depth_multiplier""" ) )
class _A :
def __init__( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Dict=13 , __magic_name__ : Any=3 , __magic_name__ : Tuple=32 , __magic_name__ : List[Any]=0.25 , __magic_name__ : int=8 , __magic_name__ : Union[str, Any]=8 , __magic_name__ : Any=6 , __magic_name__ : Optional[Any]=32 , __magic_name__ : str=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]="relu6" , __magic_name__ : Any=12_80 , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : str=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Union[str, Any]=None , ) -> List[Any]:
"""simple docstring"""
__snake_case : str = parent
__snake_case : str = batch_size
__snake_case : List[str] = num_channels
__snake_case : Any = image_size
__snake_case : Union[str, Any] = depth_multiplier
__snake_case : Optional[int] = depth_divisible_by
__snake_case : Dict = min_depth
__snake_case : Dict = expand_ratio
__snake_case : Optional[Any] = tf_padding
__snake_case : str = output_stride
__snake_case : Union[str, Any] = first_layer_is_expansion
__snake_case : Optional[Any] = finegrained_output
__snake_case : Optional[int] = hidden_act
__snake_case : Tuple = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__snake_case : Dict = classifier_dropout_prob
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = is_training
__snake_case : List[Any] = num_labels
__snake_case : Union[str, Any] = initializer_range
__snake_case : int = scope
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowercase__ ( self : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.num_labels
__snake_case : Any = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case : List[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = self.num_labels
__snake_case : Optional[int] = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case : str = config_and_inputs
__snake_case : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _A ( a__ , a__ , unittest.TestCase ):
lowercase__: Dict = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__: int = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__: Dict = False
lowercase__: Union[str, Any] = False
lowercase__: Union[str, Any] = False
lowercase__: Dict = False
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = MobileNetVaModelTester(self )
__snake_case : str = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
__snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : str ):
__snake_case : int = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__snake_case : List[Any] = outputs.hidden_states
__snake_case : List[str] = 16
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _a ( ) -> Dict:
"""simple docstring"""
__snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(SCREAMING_SNAKE_CASE_ )
__snake_case : Any = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__snake_case : List[Any] = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__snake_case : Optional[Any] = torch.tensor([0.2445, -1.1993, 0.1905] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__snake_case : Tuple = model.to(SCREAMING_SNAKE_CASE_ )
__snake_case : Union[str, Any] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__snake_case : Dict = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**SCREAMING_SNAKE_CASE_ )
__snake_case : Union[str, Any] = outputs.logits
# verify the logits
__snake_case : Tuple = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
__snake_case : Dict = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=SCREAMING_SNAKE_CASE_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 364 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
__snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : str = value
else:
__snake_case : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = []
__snake_case : List[Any] = fairseq_model.state_dict()
__snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__snake_case : Dict = """weight_g"""
elif "weight_v" in name:
__snake_case : List[str] = """weight_v"""
elif "weight" in name:
__snake_case : str = """weight"""
elif "bias" in name:
__snake_case : int = """bias"""
else:
__snake_case : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
__snake_case : Optional[int] = name.split(""".""" )
__snake_case : Dict = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = SEWConfig()
if is_finetuned:
__snake_case : List[Any] = model.wav_encoder.wav_model.cfg
else:
__snake_case : Optional[Any] = model.cfg
__snake_case : Tuple = fs_config.conv_bias
__snake_case : List[Any] = eval(fs_config.conv_feature_layers )
__snake_case : List[Any] = [x[0] for x in conv_layers]
__snake_case : Dict = [x[1] for x in conv_layers]
__snake_case : Tuple = [x[2] for x in conv_layers]
__snake_case : List[str] = """gelu"""
__snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__snake_case : Optional[int] = 0.0
__snake_case : Optional[Any] = fs_config.activation_fn.name
__snake_case : Dict = fs_config.encoder_embed_dim
__snake_case : Dict = 0.02
__snake_case : Any = fs_config.encoder_ffn_embed_dim
__snake_case : Tuple = 1E-5
__snake_case : Dict = fs_config.encoder_layerdrop
__snake_case : Any = fs_config.encoder_attention_heads
__snake_case : int = fs_config.conv_pos_groups
__snake_case : Tuple = fs_config.conv_pos
__snake_case : Optional[int] = len(_lowerCamelCase )
__snake_case : int = fs_config.encoder_layers
__snake_case : Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case : Union[str, Any] = model.cfg
__snake_case : Tuple = fs_config.final_dropout
__snake_case : Tuple = fs_config.layerdrop
__snake_case : Any = fs_config.activation_dropout
__snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case : Tuple = fs_config.attention_dropout
__snake_case : List[Any] = fs_config.dropout_input
__snake_case : Optional[Any] = fs_config.dropout
__snake_case : str = fs_config.mask_channel_length
__snake_case : Any = fs_config.mask_channel_prob
__snake_case : int = fs_config.mask_length
__snake_case : str = fs_config.mask_prob
__snake_case : str = """Wav2Vec2FeatureExtractor"""
__snake_case : Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int:
"""simple docstring"""
if is_finetuned:
__snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case : int = convert_config(model[0] , _lowerCamelCase )
__snake_case : Dict = model[0].eval()
__snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__snake_case : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case : str = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Optional[Any] = target_dict.bos_index
__snake_case : Tuple = target_dict.pad_index
__snake_case : List[str] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : List[str] = len(target_dict.symbols )
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case : List[str] = SEWForCTC(_lowerCamelCase )
else:
__snake_case : List[str] = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 13 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _A ( _SCREAMING_SNAKE_CASE ):
lowercase__: List[str] = '''canine'''
def __init__( self : Tuple , __magic_name__ : List[str]=7_68 , __magic_name__ : Dict=12 , __magic_name__ : Optional[int]=12 , __magic_name__ : Optional[int]=30_72 , __magic_name__ : Any="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : Any=1_63_84 , __magic_name__ : List[Any]=16 , __magic_name__ : Dict=0.02 , __magic_name__ : Any=1E-12 , __magic_name__ : Tuple=0 , __magic_name__ : int=0xE000 , __magic_name__ : List[Any]=0xE001 , __magic_name__ : Any=4 , __magic_name__ : Optional[int]=4 , __magic_name__ : Union[str, Any]=8 , __magic_name__ : List[str]=1_63_84 , __magic_name__ : Tuple=1_28 , **__magic_name__ : List[Any] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
__snake_case : Optional[int] = max_position_embeddings
__snake_case : str = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Dict = initializer_range
__snake_case : Tuple = type_vocab_size
__snake_case : Optional[Any] = layer_norm_eps
# Character config:
__snake_case : Optional[int] = downsampling_rate
__snake_case : Any = upsampling_kernel_size
__snake_case : List[str] = num_hash_functions
__snake_case : List[Any] = num_hash_buckets
__snake_case : Dict = local_transformer_stride
| 365 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[int] = pentagonal_nums[j]
__snake_case : str = pentagonal_i + pentagonal_j
__snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 13 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any] = None ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = value
__snake_case : Node | None = None # Added in order to delete a node easier
__snake_case : Node | None = None
__snake_case : Node | None = None
def __repr__( self : Dict ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class _A :
def __init__( self : Dict , __magic_name__ : List[Any] = None ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = root
def __str__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self.root )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[str] ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
__snake_case : str = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__lowercase ): # If it is the right children
__snake_case : Optional[int] = new_children
else:
__snake_case : Tuple = new_children
else:
__snake_case : Union[str, Any] = new_children
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase__ ( self : Dict ) -> bool:
"""simple docstring"""
return self.root is None
def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] ) -> None:
"""simple docstring"""
__snake_case : Tuple = Node(__lowercase ) # create a new Node
if self.empty(): # if Tree is empty
__snake_case : Optional[int] = new_node # set its root
else: # Tree is not empty
__snake_case : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__snake_case : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
__snake_case : Any = parent_node.left
else:
if parent_node.right is None:
__snake_case : str = new_node
break
else:
__snake_case : Any = parent_node.right
__snake_case : Tuple = parent_node
def lowercase__ ( self : Tuple , *__magic_name__ : Any ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__lowercase )
def lowercase__ ( self : List[Any] , __magic_name__ : Dict ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__snake_case : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__snake_case : List[Any] = node.left if value < node.value else node.right
return node
def lowercase__ ( self : Tuple , __magic_name__ : int = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
__snake_case : Union[str, Any] = self.root
if not self.empty():
while node.right is not None:
__snake_case : str = node.right
return node
def lowercase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] = None ) -> Node | None:
"""simple docstring"""
if node is None:
__snake_case : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
__snake_case : Dict = self.root
while node.left is not None:
__snake_case : List[str] = node.left
return node
def lowercase__ ( self : Dict , __magic_name__ : str ) -> None:
"""simple docstring"""
__snake_case : List[Any] = self.search(__lowercase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__lowercase , __lowercase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__lowercase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__lowercase , node.left )
else:
__snake_case : List[Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__snake_case : Tuple = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase__ ( self : int , __magic_name__ : List[str] ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase__ ( self : str , __magic_name__ : int=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[Any] ) -> None:
"""simple docstring"""
if node:
self.inorder(__lowercase , node.left )
arr.append(node.value )
self.inorder(__lowercase , node.right )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int:
"""simple docstring"""
__snake_case : list[int] = []
self.inorder(__lowercase , __lowercase ) # append all values to list using inorder traversal
return arr[k - 1]
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
if curr_node is not None:
__snake_case : List[str] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _a ( ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__snake_case : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn\'t exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn\'t exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 366 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : Dict = output[output != -float("""inf""" )]
__snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Tuple = [[1, 0], [1, 1]]
__snake_case : Union[str, Any] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Dict = 1
__snake_case : int = 2
class _A ( tf.Module ):
def __init__( self : Tuple , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : Union[str, Any] = [[2], [1_02, 1_03]]
__snake_case : Tuple = [[1], [1, 1]]
__snake_case : List[str] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : Tuple = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : str = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : int = CompleteSentenceTransformer()
__snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Tuple = complete_model(__magic_name__ )
__snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : str = 14
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = """Hello, my dog is cute and"""
__snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : str = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : int = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 13 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__UpperCamelCase = logging.getLogger(__name__)
__UpperCamelCase = {'facebook/bart-base': BartForConditionalGeneration}
__UpperCamelCase = {'facebook/bart-base': BartTokenizer}
def _a ( ) -> str:
"""simple docstring"""
__snake_case : Tuple = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=snake_case_ , default=snake_case_ , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=snake_case_ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=snake_case_ , default=snake_case_ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case_ , )
parser.add_argument(
"""--config_name""" , type=snake_case_ , default=snake_case_ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=snake_case_ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=snake_case_ , default=snake_case_ , help="""Where to store the final ONNX file.""" )
__snake_case : Dict = parser.parse_args()
return args
def _a ( _lowerCamelCase , _lowerCamelCase="cpu" ) -> Dict:
"""simple docstring"""
__snake_case : str = model_dict[model_name].from_pretrained(snake_case_ ).to(snake_case_ )
__snake_case : Tuple = tokenizer_dict[model_name].from_pretrained(snake_case_ )
if model_name in ["facebook/bart-base"]:
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = None
__snake_case : Any = 0
return huggingface_model, tokenizer
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
model.eval()
__snake_case : int = None
__snake_case : List[Any] = torch.jit.script(BARTBeamSearchGenerator(snake_case_ ) )
with torch.no_grad():
__snake_case : Optional[Any] = """My friends are cool but they eat too many carbs."""
__snake_case : List[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
__snake_case : str = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=snake_case_ , max_length=snake_case_ , early_stopping=snake_case_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case_ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case_ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=snake_case_ , )
logger.info("""Model exported to {}""".format(snake_case_ ) )
__snake_case : Any = remove_dup_initializers(os.path.abspath(snake_case_ ) )
logger.info("""Deduplicated and optimized model written to {}""".format(snake_case_ ) )
__snake_case : Union[str, Any] = onnxruntime.InferenceSession(snake_case_ )
__snake_case : Optional[Any] = ort_sess.run(
snake_case_ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(snake_case_ ),
"""max_length""": np.array(snake_case_ ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _a ( ) -> int:
"""simple docstring"""
__snake_case : List[Any] = parse_args()
__snake_case : Optional[Any] = 5
__snake_case : Optional[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__snake_case : Optional[int] = torch.device(args.device )
__snake_case , __snake_case : Tuple = load_model_tokenizer(args.model_name_or_path , snake_case_ )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(snake_case_ )
if args.max_length:
__snake_case : List[str] = args.max_length
if args.num_beams:
__snake_case : Optional[Any] = args.num_beams
if args.output_file_path:
__snake_case : int = args.output_file_path
else:
__snake_case : int = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
__snake_case : int = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("""""" )
print(len(_lowerCamelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 13 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ) -> str:
"""simple docstring"""
if attention_mask is None:
__snake_case : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__snake_case : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__snake_case : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[str]=13 , __magic_name__ : int=7 , __magic_name__ : Any=True , __magic_name__ : int=False , __magic_name__ : List[Any]=99 , __magic_name__ : Optional[int]=16 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Dict=4 , __magic_name__ : Dict=4 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Dict=32 , __magic_name__ : int=2 , __magic_name__ : str=1 , __magic_name__ : List[Any]=0 , __magic_name__ : List[str]=0.02 , ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = seq_length
__snake_case : Any = is_training
__snake_case : str = use_labels
__snake_case : int = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Tuple = eos_token_id
__snake_case : Tuple = pad_token_id
__snake_case : Optional[int] = bos_token_id
__snake_case : Optional[int] = initializer_range
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__snake_case : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__snake_case : Optional[int] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__snake_case : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
__snake_case : str = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> int:
"""simple docstring"""
__snake_case : List[Any] = 20
__snake_case : List[Any] = model_class_name(_UpperCAmelCase )
__snake_case : Any = model.encode(inputs_dict["""input_ids"""] )
__snake_case : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__snake_case : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__snake_case : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Tuple = model.decode(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : int ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[int] = 20
__snake_case : Dict = model_class_name(_UpperCAmelCase )
__snake_case : str = model.encode(inputs_dict["""input_ids"""] )
__snake_case : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__snake_case : Dict = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case : Any = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__snake_case : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Union[str, Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
__snake_case : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class _A ( unittest.TestCase ):
lowercase__: Union[str, Any] = 99
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__snake_case : Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__snake_case : List[str] = input_ids.shape[0]
__snake_case : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = self._get_config_and_data()
__snake_case : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__snake_case : str = lm_model(input_ids=_UpperCAmelCase )
__snake_case : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__snake_case : Dict = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__snake_case : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__snake_case : List[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__snake_case : int = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__snake_case : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__snake_case : Tuple = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__snake_case : Union[str, Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__snake_case : str = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _A ( _UpperCamelCase , unittest.TestCase , _UpperCamelCase ):
lowercase__: Union[str, Any] = True
lowercase__: Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__: List[str] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
__snake_case : List[Any] = FlaxBlenderbotSmallModelTester(self )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[int] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(__magic_name__ : List[Any] , __magic_name__ : List[str]=None , **__magic_name__ : str ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
__snake_case : str = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__snake_case : Tuple = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Union[str, Any] = model_class(_UpperCAmelCase )
__snake_case : List[Any] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__snake_case : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(__magic_name__ : Any , __magic_name__ : str , __magic_name__ : Tuple ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest("""JIT Enabled""" ):
__snake_case : Union[str, Any] = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__snake_case : Dict = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case : Dict = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__snake_case : Any = np.ones((1, 1) ) * model.config.eos_token_id
__snake_case : List[str] = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 368 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _A ( __snake_case ):
lowercase__: List[str] = """xlm-roberta"""
def __init__( self : str , __magic_name__ : Optional[int]=3_05_22 , __magic_name__ : List[str]=7_68 , __magic_name__ : List[Any]=12 , __magic_name__ : List[str]=12 , __magic_name__ : int=30_72 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : List[str]=5_12 , __magic_name__ : int=2 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : str=1E-12 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : int=0 , __magic_name__ : Dict=2 , __magic_name__ : List[Any]="absolute" , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=None , **__magic_name__ : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__snake_case : str = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : List[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : List[Any] = layer_norm_eps
__snake_case : Optional[int] = position_embedding_type
__snake_case : Tuple = use_cache
__snake_case : Union[str, Any] = classifier_dropout
class _A ( __snake_case ):
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 369 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _A :
lowercase__: str
lowercase__: Optional[str] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return self.version_str
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ".".join(str(_lowerCamelCase ) for v in version_tuple )
| 13 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if isinstance(__lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _A :
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def lowercase__ ( self : Any , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int=None , **__magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__snake_case , __snake_case )
__snake_case : Union[str, Any] = TFVisionTextDualEncoderModel(__snake_case )
__snake_case : str = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase__ ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict=None , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.get_vision_text_model(__snake_case , __snake_case )
__snake_case : Dict = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case )
__snake_case : int = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Optional[Any]=None , **__magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case , __snake_case : str = self.get_vision_text_model(__snake_case , __snake_case )
__snake_case : List[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__snake_case : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__snake_case )
__snake_case : Tuple = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase__ ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Union[str, Any]=None , **__magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.get_vision_text_model(__snake_case , __snake_case )
__snake_case : str = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case )
__snake_case : List[str] = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case )
__snake_case : str = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
__snake_case : Tuple = TFVisionTextDualEncoderModel.from_pretrained(__snake_case )
__snake_case : Tuple = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case )
__snake_case : Optional[Any] = after_output[0].numpy()
__snake_case : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__snake_case , 1E-5 )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , **__magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : Optional[int] = self.get_vision_text_model(__snake_case , __snake_case )
__snake_case : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case )
__snake_case : str = model(
input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , output_attentions=__snake_case )
__snake_case : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : Any = to_atuple(vision_model.config.image_size )
__snake_case : Optional[Any] = to_atuple(vision_model.config.patch_size )
__snake_case : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__snake_case : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__snake_case : Tuple = output.text_model_output.attentions
self.assertEqual(len(__snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase__ ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : float ) -> int:
"""simple docstring"""
__snake_case : str = np.abs((a - b) ).max()
self.assertLessEqual(__snake_case , __snake_case , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__snake_case )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__snake_case )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__snake_case )
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : str = self.prepare_config_and_inputs()
self.check_save_load(**__snake_case )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__snake_case )
@slow
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.get_pretrained_model_and_inputs()
__snake_case : Optional[Any] = model_a(**__snake_case )
__snake_case : Optional[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__snake_case )
__snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(__snake_case )
__snake_case : List[str] = model_a(**__snake_case )
__snake_case : Any = after_outputs[0].numpy()
__snake_case : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__snake_case , 1E-5 )
@require_tf
class _A ( lowerCamelCase_ , unittest.TestCase ):
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__snake_case : int = 13
__snake_case : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case : Union[str, Any] = random_attention_mask([batch_size, 4] )
__snake_case : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowercase__ ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : Any = TFViTModel(__snake_case , name="""vision_model""" )
__snake_case : Tuple = TFBertModel(__snake_case , name="""text_model""" )
return vision_model, text_model
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[int] = TFViTModelTester(self )
__snake_case : int = TFBertModelTester(self )
__snake_case : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
__snake_case : str = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( lowerCamelCase_ , unittest.TestCase ):
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__snake_case : Any = 13
__snake_case : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case : Dict = random_attention_mask([batch_size, 4] )
__snake_case : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict=None , **__magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : Optional[int] = self.get_vision_text_model(__snake_case , __snake_case )
__snake_case : int = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case )
__snake_case : List[str] = model(
input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , output_attentions=__snake_case )
__snake_case : List[str] = output.vision_model_output.attentions
self.assertEqual(len(__snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__snake_case : List[str] = to_atuple(vision_model.config.image_size )
__snake_case : str = to_atuple(vision_model.config.patch_size )
__snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__snake_case : Optional[Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__snake_case : Tuple = output.text_model_output.attentions
self.assertEqual(len(__snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : List[str] ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = TFDeiTModel(__snake_case , name="""vision_model""" )
__snake_case : Optional[int] = TFRobertaModel(__snake_case , name="""text_model""" )
return vision_model, text_model
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = TFDeiTModelTester(self )
__snake_case : str = TFRobertaModelTester(self )
__snake_case : Dict = vit_model_tester.prepare_config_and_inputs()
__snake_case : Tuple = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : str = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( lowerCamelCase_ , unittest.TestCase ):
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__snake_case : Optional[Any] = 13
__snake_case : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case : Tuple = random_attention_mask([batch_size, 4] )
__snake_case : Any = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = TFCLIPVisionModel(__snake_case , name="""vision_model""" )
__snake_case : Dict = TFBertModel(__snake_case , name="""text_model""" )
return vision_model, text_model
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Dict = TFCLIPVisionModelTester(self )
__snake_case : Tuple = TFBertModelTester(self )
__snake_case : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
__snake_case : Dict = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case : str = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__snake_case )
__snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__snake_case : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__snake_case , padding=__snake_case , return_tensors="""np""" )
__snake_case : Optional[Any] = model(**__snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__snake_case : Optional[int] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __snake_case , atol=1E-3 ) )
| 370 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__snake_case : Tuple = """"""
while len(_lowerCamelCase ) % 3 != 0:
__snake_case : Any = """0""" + bin_string
__snake_case : Tuple = [
bin_string[index : index + 3]
for index in range(len(_lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case : Tuple = 0
for index, val in enumerate(_lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) )
oct_string += str(_lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class _A ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : Any ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
def __call__( self : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__snake_case : Dict = 1
__snake_case : List[Any] = self.unet(A_ , A_ ).sample
__snake_case : Optional[Any] = self.scheduler.step(A_ , A_ , A_ ).prev_sample
__snake_case : Optional[Any] = scheduler_output - scheduler_output + torch.ones_like(A_ )
return result
| 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _A ( unittest.TestCase ):
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ )
__snake_case : str = -1
__snake_case : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ )
__snake_case : int = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ )
__snake_case : Union[str, Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case : Union[str, Any] = TextStreamer(__magic_name__ )
model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ , streamer=__magic_name__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : Dict = cs.out[:-1]
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ )
__snake_case : Any = -1
__snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ )
__snake_case : Optional[Any] = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ )
__snake_case : Dict = tokenizer.decode(greedy_ids[0] )
__snake_case : List[Any] = TextIteratorStreamer(__magic_name__ )
__snake_case : Dict = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__snake_case : List[str] = Thread(target=model.generate , kwargs=__magic_name__ )
thread.start()
__snake_case : Dict = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ )
__snake_case : int = -1
__snake_case : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ )
__snake_case : Optional[int] = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ )
__snake_case : Any = greedy_ids[:, input_ids.shape[1] :]
__snake_case : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case : str = TextStreamer(__magic_name__ , skip_prompt=__magic_name__ )
model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ , streamer=__magic_name__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : str = cs.out[:-1]
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""distilgpt2""" )
__snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__magic_name__ )
__snake_case : Any = -1
__snake_case : Union[str, Any] = torch.ones((1, 5) , device=__magic_name__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__snake_case : Any = TextStreamer(__magic_name__ , skip_special_tokens=__magic_name__ )
model.generate(__magic_name__ , max_new_tokens=1 , do_sample=__magic_name__ , streamer=__magic_name__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__snake_case : List[Any] = cs.out[:-1] # Remove the final "\n"
__snake_case : str = tokenizer(__magic_name__ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ )
__snake_case : Union[str, Any] = -1
__snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ )
__snake_case : str = TextIteratorStreamer(__magic_name__ , timeout=0.001 )
__snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__snake_case : int = Thread(target=model.generate , kwargs=__magic_name__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__magic_name__ ):
__snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
| 350 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int]=3 , __magic_name__ : List[Any]=32 , __magic_name__ : int=3 , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Optional[int]=[10, 20, 30, 40] , __magic_name__ : Union[str, Any]=[1, 1, 2, 1] , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[str]="relu" , __magic_name__ : Optional[int]=3 , __magic_name__ : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = parent
__snake_case : List[str] = batch_size
__snake_case : Dict = image_size
__snake_case : Tuple = num_channels
__snake_case : int = embeddings_size
__snake_case : Optional[int] = hidden_sizes
__snake_case : Any = depths
__snake_case : Optional[int] = is_training
__snake_case : Optional[int] = use_labels
__snake_case : Tuple = hidden_act
__snake_case : Dict = num_labels
__snake_case : int = scope
__snake_case : Optional[int] = len(__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = RegNetModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = self.num_labels
__snake_case : List[str] = RegNetForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case : Optional[int] = config_and_inputs
__snake_case : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowercase__: Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__: List[Any] = False
lowercase__: int = False
lowercase__: Tuple = False
lowercase__: Optional[int] = False
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = RegNetModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(__magic_name__ )
__snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Dict = [*signature.parameters.keys()]
__snake_case : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(config=__magic_name__ )
for name, module in model.named_modules():
if isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str ):
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Any = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : Union[str, Any] = layer_type
__snake_case : Any = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[str] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = RegNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
__snake_case : List[str] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : int = prepare_img()
__snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(**__magic_name__ )
# verify the logits
__snake_case : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : List[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 351 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = checkpoint
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = vae_state_dict["""encoder.conv_in.weight"""]
__snake_case : str = vae_state_dict["""encoder.conv_in.bias"""]
__snake_case : List[Any] = vae_state_dict["""encoder.conv_out.weight"""]
__snake_case : Dict = vae_state_dict["""encoder.conv_out.bias"""]
__snake_case : List[str] = vae_state_dict["""encoder.norm_out.weight"""]
__snake_case : Tuple = vae_state_dict["""encoder.norm_out.bias"""]
__snake_case : Dict = vae_state_dict["""decoder.conv_in.weight"""]
__snake_case : Union[str, Any] = vae_state_dict["""decoder.conv_in.bias"""]
__snake_case : List[str] = vae_state_dict["""decoder.conv_out.weight"""]
__snake_case : List[Any] = vae_state_dict["""decoder.conv_out.bias"""]
__snake_case : Optional[Any] = vae_state_dict["""decoder.norm_out.weight"""]
__snake_case : Optional[int] = vae_state_dict["""decoder.norm_out.bias"""]
__snake_case : Tuple = vae_state_dict["""quant_conv.weight"""]
__snake_case : List[Any] = vae_state_dict["""quant_conv.bias"""]
__snake_case : Any = vae_state_dict["""post_quant_conv.weight"""]
__snake_case : Union[str, Any] = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
__snake_case : List[Any] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
__snake_case : int = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(_lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
__snake_case : List[Any] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
__snake_case : str = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(_lowerCamelCase )
}
for i in range(_lowerCamelCase ):
__snake_case : List[Any] = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
__snake_case : List[Any] = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
__snake_case : Any = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
__snake_case : Optional[Any] = renew_vae_resnet_paths(_lowerCamelCase )
__snake_case : Tuple = {"""old""": F'''down.{i}.block''', """new""": F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
__snake_case : str = [key for key in vae_state_dict if """encoder.mid.block""" in key]
__snake_case : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Dict = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
__snake_case : Tuple = renew_vae_resnet_paths(_lowerCamelCase )
__snake_case : List[Any] = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
__snake_case : int = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
__snake_case : int = renew_vae_attention_paths(_lowerCamelCase )
__snake_case : Optional[int] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
conv_attn_to_linear(_lowerCamelCase )
for i in range(_lowerCamelCase ):
__snake_case : Optional[Any] = num_up_blocks - 1 - i
__snake_case : Optional[Any] = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
__snake_case : Optional[int] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
__snake_case : List[Any] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
__snake_case : Tuple = renew_vae_resnet_paths(_lowerCamelCase )
__snake_case : List[Any] = {"""old""": F'''up.{block_id}.block''', """new""": F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
__snake_case : Tuple = [key for key in vae_state_dict if """decoder.mid.block""" in key]
__snake_case : Optional[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Union[str, Any] = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
__snake_case : Dict = renew_vae_resnet_paths(_lowerCamelCase )
__snake_case : str = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
__snake_case : List[Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
__snake_case : List[str] = renew_vae_attention_paths(_lowerCamelCase )
__snake_case : Tuple = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
conv_attn_to_linear(_lowerCamelCase )
return new_checkpoint
def _a ( _lowerCamelCase , _lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
__snake_case : int = io.BytesIO(r.content )
__snake_case : List[Any] = OmegaConf.load(_lowerCamelCase )
__snake_case : Union[str, Any] = 512
__snake_case : str = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
__snake_case : Any = {}
with safe_open(_lowerCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
__snake_case : List[str] = f.get_tensor(_lowerCamelCase )
else:
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )["""state_dict"""]
# Convert the VAE model.
__snake_case : str = create_vae_diffusers_config(_lowerCamelCase , image_size=_lowerCamelCase )
__snake_case : Any = custom_convert_ldm_vae_checkpoint(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[Any] = AutoencoderKL(**_lowerCamelCase )
vae.load_state_dict(_lowerCamelCase )
vae.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
__UpperCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 352 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = patch_size
__snake_case : List[str] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : List[str] = use_labels
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = attention_type
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[Any] = scope
__snake_case : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__snake_case : str = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__snake_case : str = self.num_labels
return config
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Any = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
# verify the logits shape
__snake_case : Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__: List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: List[str] = False
lowercase__: List[Any] = False
lowercase__: Dict = False
lowercase__: int = False
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = TimesformerModelTester(self )
__snake_case : List[Any] = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length
__snake_case : Tuple = self.model_tester.num_frames
__snake_case : str = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Optional[int] = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__snake_case : int = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.hidden_states
__snake_case : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__magic_name__ )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Dict = prepare_video()
__snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : int = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if exitstatus == 5:
__snake_case : List[Any] = 0
# Doctest custom flag to ignore output.
__UpperCamelCase = doctest.register_optionflag("IGNORE_RESULT")
__UpperCamelCase = doctest.OutputChecker
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __magic_name__ , __magic_name__ , __magic_name__ )
__UpperCamelCase = CustomOutputChecker
__UpperCamelCase = HfDoctestModule
__UpperCamelCase = HfDocTestParser
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( __lowercase ):
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__magic_name__ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__magic_name__ , """num_attention_heads""" ) )
class _A :
def __init__( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=13 , __magic_name__ : List[str]=32 , __magic_name__ : Optional[int]=2 , __magic_name__ : List[str]=3 , __magic_name__ : Optional[int]=6_40 , __magic_name__ : Dict=4 , __magic_name__ : Tuple="silu" , __magic_name__ : Optional[int]=3 , __magic_name__ : Any=32 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : Any=True , __magic_name__ : str=10 , __magic_name__ : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = parent
__snake_case : List[str] = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = patch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Dict = last_hidden_size
__snake_case : Dict = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : int = conv_kernel_size
__snake_case : Tuple = output_stride
__snake_case : str = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : Any = use_labels
__snake_case : str = is_training
__snake_case : int = num_labels
__snake_case : int = initializer_range
__snake_case : int = scope
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = MobileViTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : Optional[Any] = MobileViTForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.num_labels
__snake_case : str = MobileViTForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : Tuple = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = self.prepare_config_and_inputs()
__snake_case : List[str] = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__: Optional[int] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__: Union[str, Any] = False
lowercase__: Optional[int] = False
lowercase__: int = False
lowercase__: int = False
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : Union[str, Any] = MobileViTConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(__magic_name__ )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[int] ):
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[str] = outputs.hidden_states
__snake_case : Union[str, Any] = 5
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Union[str, Any] = 2
for i in range(len(__magic_name__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@slow
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = MobileViTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(__magic_name__ )
__snake_case : Tuple = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Optional[int] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Dict = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case : int = model.to(__magic_name__ )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case : Union[str, Any] = prepare_img()
__snake_case : int = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : str = model(**__magic_name__ )
__snake_case : Optional[int] = outputs.logits
# verify the logits
__snake_case : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __magic_name__ )
__snake_case : Union[str, Any] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case : Tuple = model.to(__magic_name__ )
__snake_case : Any = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case : List[str] = prepare_img()
__snake_case : List[str] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : int = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(50, 60)] )
__snake_case : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
__snake_case : str = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
__snake_case : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 354 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : str = 0
__snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if len(_lowerCamelCase ) <= 1:
return arr, 0
__snake_case : Any = len(_lowerCamelCase ) // 2
__snake_case : List[str] = arr[0:mid]
__snake_case : int = arr[mid:]
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = []
__snake_case : List[str] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__snake_case : Any = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
# an empty list should also have zero inversions
__snake_case : List[Any] = []
__snake_case : List[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _A ( __lowercase ):
def __init__( self : List[str] , __magic_name__ : int , __magic_name__ : Optional[Any]=13 , __magic_name__ : str=7 , __magic_name__ : List[str]=True , __magic_name__ : int=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[int]=False , __magic_name__ : str=False , __magic_name__ : Tuple=False , __magic_name__ : int=2 , __magic_name__ : int=99 , __magic_name__ : Dict=0 , __magic_name__ : str=32 , __magic_name__ : Union[str, Any]=5 , __magic_name__ : Dict=4 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : List[str]=12 , __magic_name__ : Tuple=2 , __magic_name__ : List[str]=0.02 , __magic_name__ : int=3 , __magic_name__ : str=4 , __magic_name__ : Any="last" , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = parent
__snake_case : Tuple = batch_size
__snake_case : Any = seq_length
__snake_case : List[Any] = is_training
__snake_case : Dict = use_input_lengths
__snake_case : Tuple = use_token_type_ids
__snake_case : List[str] = use_labels
__snake_case : Tuple = gelu_activation
__snake_case : Optional[Any] = sinusoidal_embeddings
__snake_case : int = causal
__snake_case : Dict = asm
__snake_case : Any = n_langs
__snake_case : Optional[int] = vocab_size
__snake_case : Dict = n_special
__snake_case : Dict = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : int = num_labels
__snake_case : List[str] = num_choices
__snake_case : int = summary_type
__snake_case : List[Any] = use_proj
__snake_case : Union[str, Any] = scope
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = None
if self.use_input_lengths:
__snake_case : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case : str = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case : Optional[int] = None
__snake_case : List[Any] = None
__snake_case : int = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[str] = ids_tensor([self.batch_size] , 2 ).float()
__snake_case : Dict = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowercase__ ( self : Dict , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = FlaubertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ , lengths=__magic_name__ , langs=__magic_name__ )
__snake_case : Any = model(__magic_name__ , langs=__magic_name__ )
__snake_case : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : List[Any] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = FlaubertWithLMHeadModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Any , ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = FlaubertForQuestionAnsweringSimple(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Dict = model(__magic_name__ )
__snake_case : Dict = model(__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , ) -> int:
"""simple docstring"""
__snake_case : List[str] = FlaubertForQuestionAnswering(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
__snake_case : Optional[Any] = model(
__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , cls_index=__magic_name__ , is_impossible=__magic_name__ , p_mask=__magic_name__ , )
__snake_case : int = model(
__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , cls_index=__magic_name__ , is_impossible=__magic_name__ , )
(__snake_case ) : int = result_with_labels.to_tuple()
__snake_case : int = model(__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ )
(__snake_case ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : str , ) -> str:
"""simple docstring"""
__snake_case : Any = FlaubertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = model(__magic_name__ )
__snake_case : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : int , ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = FlaubertForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.num_choices
__snake_case : int = FlaubertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
__snake_case
) : Optional[int] = config_and_inputs
__snake_case : Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__: List[str] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
__snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : int = FlaubertModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , emb_dim=37 )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__magic_name__ )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__magic_name__ )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__magic_name__ )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__magic_name__ )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__magic_name__ )
@slow
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = FlaubertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
@require_torch_gpu
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__snake_case : Any = True
__snake_case : List[Any] = model_class(config=__magic_name__ )
__snake_case : List[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
__snake_case : List[str] = torch.jit.trace(
__magic_name__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__magic_name__ , os.path.join(__magic_name__ , """traced_model.pt""" ) )
__snake_case : Optional[int] = torch.jit.load(os.path.join(__magic_name__ , """traced_model.pt""" ) , map_location=__magic_name__ )
loaded(inputs_dict["""input_ids"""].to(__magic_name__ ) , inputs_dict["""attention_mask"""].to(__magic_name__ ) )
@require_torch
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case : str = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
__snake_case : Tuple = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__snake_case : Optional[Any] = model(__magic_name__ )[0]
__snake_case : int = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Optional[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 355 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 0 |
'''simple docstring'''
import os
import sys
import transformers
__UpperCamelCase = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 356 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = CanineTokenizer
lowercase__: Optional[int] = False
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
__snake_case : Optional[Any] = 10_24
return tokenizer
@require_torch
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.canine_tokenizer
__snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Any = self.canine_tokenizer
__snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.canine_tokenizer
__snake_case : Optional[Any] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__snake_case : Any = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : str = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
__snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__snake_case : List[Any] = chr(0xE007 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE005
__snake_case : Tuple = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
__snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
__snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
__snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Dict = chr(0xE005 )
__snake_case : str = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__snake_case : Tuple = tokenizer.tokenize(__magic_name__ )
__snake_case : Any = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__snake_case : Optional[Any] = 0xE006
__snake_case : List[str] = chr(__magic_name__ )
__snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Any = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Tuple = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE006
__snake_case : int = chr(__magic_name__ )
__snake_case : List[Any] = [new_token_a]
__snake_case : Union[str, Any] = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__snake_case : Any = 0xE007
__snake_case : Any = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
__snake_case : Union[str, Any] = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : List[str] = """hello world"""
if self.space_between_special_tokens:
__snake_case : Union[str, Any] = """[CLS] hello world [SEP]"""
else:
__snake_case : List[Any] = input
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__snake_case : Dict = """a"""
__snake_case : Tuple = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
__snake_case : Dict = 0xE006
__snake_case : str = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
| 13 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_lowerCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__UpperCamelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _A ( __lowercase ):
lowercase__: Any = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask''']
lowercase__: List[str] = BartTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
__snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Any = """post_processor"""
__snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
__snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : int = tuple(state["""cls"""] )
__snake_case : Optional[int] = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Optional[Any] = add_prefix_space
__snake_case : List[str] = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
__snake_case : Optional[int] = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) )
__snake_case : List[Any] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
__snake_case : Union[str, Any] = value
def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 358 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: str = '''codegen'''
lowercase__: Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
__snake_case : List[str] = vocab_size
__snake_case : Union[str, Any] = n_ctx
__snake_case : int = n_positions
__snake_case : str = n_embd
__snake_case : Dict = n_layer
__snake_case : List[Any] = n_head
__snake_case : Any = n_inner
__snake_case : str = rotary_dim
__snake_case : List[str] = activation_function
__snake_case : Tuple = resid_pdrop
__snake_case : Dict = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = use_cache
__snake_case : Dict = bos_token_id
__snake_case : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , """pad_token_id""" , __magic_name__ ):
# TODO: how to do that better?
__snake_case : List[str] = 0
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
__snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Tuple = seqlen + 2
__snake_case : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : List[str] = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
__snake_case : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
| 13 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _A ( unittest.TestCase ):
def __init__( self : Optional[int] , __magic_name__ : Any , __magic_name__ : Tuple=13 , __magic_name__ : str=7 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , __magic_name__ : int=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=99 , __magic_name__ : List[str]=32 , __magic_name__ : Optional[Any]=5 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : int="gelu" , __magic_name__ : str=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Dict=5_12 , __magic_name__ : List[Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Optional[Any]=4 , ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : Any = seq_length
__snake_case : Optional[int] = is_training
__snake_case : str = use_attention_mask
__snake_case : List[str] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : Optional[Any] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : int = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : str = initializer_range
__snake_case : Any = num_choices
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = None
if self.use_attention_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = None
if self.use_token_type_ids:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case : List[str] = config_and_inputs
__snake_case : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
__snake_case : str = self.prepare_config_and_inputs()
__snake_case : int = config_and_inputs
__snake_case : str = True
__snake_case : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = True
lowercase__: List[str] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case : Optional[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ )
__snake_case : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ )
__snake_case : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__snake_case : List[Any] = model(__magic_name__ )[0]
__snake_case : Optional[Any] = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , __magic_name__ )
# compare the actual values for a slice.
__snake_case : List[Any] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ )
__snake_case : Union[str, Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__snake_case : Dict = model(__magic_name__ )[0]
# compare the actual values for a slice.
__snake_case : Dict = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 359 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 0 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _A ( __lowercase , __lowercase ):
lowercase__: int = 1
@register_to_config
def __init__( self : Dict , __magic_name__ : int = 10_00 , __magic_name__ : Optional[Union[np.ndarray, List[float]]] = None ) -> int:
"""simple docstring"""
self.set_timesteps(__magic_name__ )
# standard deviation of the initial noise distribution
__snake_case : List[str] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__snake_case : int = 4
# running values
__snake_case : Any = []
def lowercase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : Union[str, torch.device] = None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = num_inference_steps
__snake_case : List[str] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__snake_case : List[Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__snake_case : Optional[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__snake_case : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
__snake_case : List[str] = (1.0 - self.betas**2) ** 0.5
__snake_case : Any = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__snake_case : List[Any] = timesteps.to(__magic_name__ )
__snake_case : str = []
def lowercase__ ( self : Tuple , __magic_name__ : torch.FloatTensor , __magic_name__ : int , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__snake_case : Optional[Any] = (self.timesteps == timestep).nonzero().item()
__snake_case : List[str] = timestep_index + 1
__snake_case : Optional[int] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__magic_name__ )
if len(self.ets ) == 1:
__snake_case : Optional[Any] = self.ets[-1]
elif len(self.ets ) == 2:
__snake_case : Optional[Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__snake_case : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__snake_case : Optional[int] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__snake_case : Dict = self._get_prev_sample(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : torch.FloatTensor , *__magic_name__ : Any , **__magic_name__ : Tuple ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase__ ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.alphas[timestep_index]
__snake_case : int = self.betas[timestep_index]
__snake_case : Union[str, Any] = self.alphas[prev_timestep_index]
__snake_case : str = self.betas[prev_timestep_index]
__snake_case : Dict = (sample - sigma * ets) / max(__magic_name__ , 1E-8 )
__snake_case : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 360 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__UpperCamelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _A ( __lowercase ):
lowercase__: Any = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask''']
lowercase__: List[str] = BartTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
__snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Any = """post_processor"""
__snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
__snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : int = tuple(state["""cls"""] )
__snake_case : Optional[int] = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Optional[Any] = add_prefix_space
__snake_case : List[str] = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
__snake_case : Optional[int] = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) )
__snake_case : List[Any] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
__snake_case : Union[str, Any] = value
def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase ) -> list[int]:
"""simple docstring"""
__snake_case : Any = [True] * limit
__snake_case : int = False
__snake_case : str = False
__snake_case : Dict = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__snake_case : str = i * 2
while index < limit:
__snake_case : int = False
__snake_case : List[str] = index + i
__snake_case : Dict = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def _a ( _lowerCamelCase = 100_0000 ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = prime_sieve(_lowerCamelCase )
__snake_case : Dict = 0
__snake_case : Union[str, Any] = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
__snake_case : Optional[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__snake_case : Optional[int] = j - i
__snake_case : int = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 361 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = a.name
__snake_case : Dict = b.name
__snake_case : Optional[int] = """"""
__snake_case : int = """"""
__snake_case : Any = a == b
__snake_case : List[Any] = name_a
__snake_case : List[str] = name_b
return res
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = os.path.dirname(_lowerCamelCase )
__snake_case : Dict = os.path.basename(_lowerCamelCase )
__snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : Optional[int] = set()
__snake_case : Optional[Any] = {}
__snake_case : Tuple = []
__snake_case : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
__snake_case : Any = inits[i].name
__snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__snake_case : int = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = """optimized_""" + model_file_name
__snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 13 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : Optional[int] = generate_pascal_triangle(_lowerCamelCase )
for row_idx in range(_lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def _a ( _lowerCamelCase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__snake_case : list[list[int]] = []
for current_row_idx in range(_lowerCamelCase ):
__snake_case : List[str] = populate_current_row(_lowerCamelCase , _lowerCamelCase )
triangle.append(_lowerCamelCase )
return triangle
def _a ( _lowerCamelCase , _lowerCamelCase ) -> list[int]:
"""simple docstring"""
__snake_case : Optional[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__snake_case : List[str] = 1, 1
for current_col_idx in range(1 , _lowerCamelCase ):
calculate_current_element(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return current_row
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
__snake_case : List[str] = triangle[current_row_idx - 1][current_col_idx - 1]
__snake_case : Optional[Any] = triangle[current_row_idx - 1][current_col_idx]
__snake_case : str = above_to_left_elt + above_to_right_elt
def _a ( _lowerCamelCase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__snake_case : list[list[int]] = [[1]]
for row_index in range(1 , _lowerCamelCase ):
__snake_case : Optional[int] = [0] + result[-1] + [0]
__snake_case : Tuple = row_index + 1
# Calculate the number of distinct elements in a row
__snake_case : Tuple = sum(divmod(_lowerCamelCase , 2 ) )
__snake_case : Union[str, Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__snake_case : List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__snake_case : Dict = row_first_half + row_second_half
result.append(_lowerCamelCase )
return result
def _a ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase , _lowerCamelCase ) -> None:
__snake_case : Tuple = F'''{func.__name__}({value})'''
__snake_case : str = timeit(F'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 362 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ["small", "medium", "large"]
__UpperCamelCase = "lm_head.decoder.weight"
__UpperCamelCase = "lm_head.weight"
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase )
__snake_case : Optional[int] = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__UpperCamelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: List[Any] = StableDiffusionPanoramaPipeline
lowercase__: Dict = TEXT_TO_IMAGE_PARAMS
lowercase__: Dict = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__snake_case : Optional[int] = DDIMScheduler()
torch.manual_seed(0 )
__snake_case : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__snake_case : Optional[Any] = CLIPTextModel(__magic_name__ )
__snake_case : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__snake_case : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Optional[int] , __magic_name__ : Any , __magic_name__ : List[str]=0 ) -> int:
"""simple docstring"""
__snake_case : str = torch.manual_seed(__magic_name__ )
__snake_case : List[Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : List[str] = self.get_dummy_components()
__snake_case : List[str] = StableDiffusionPanoramaPipeline(**__magic_name__ )
__snake_case : Any = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[Any] = self.get_dummy_inputs(__magic_name__ )
__snake_case : Union[str, Any] = sd_pipe(**__magic_name__ ).images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : Union[str, Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[Any] = self.get_dummy_components()
__snake_case : Optional[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ )
__snake_case : str = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[Any] = self.get_dummy_inputs(__magic_name__ )
__snake_case : Optional[int] = """french fries"""
__snake_case : Optional[int] = sd_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
__snake_case : Optional[Any] = output.images
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : str = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : int = self.get_dummy_components()
__snake_case : Dict = StableDiffusionPanoramaPipeline(**__magic_name__ )
__snake_case : Optional[int] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : int = self.get_dummy_inputs(__magic_name__ )
__snake_case : Optional[Any] = sd_pipe(**__magic_name__ , view_batch_size=2 )
__snake_case : Optional[int] = output.images
__snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : Any = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__snake_case : List[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ )
__snake_case : List[str] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : Tuple = self.get_dummy_inputs(__magic_name__ )
__snake_case : List[Any] = sd_pipe(**__magic_name__ ).images
__snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
__snake_case : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : str = self.get_dummy_components()
__snake_case : str = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=__magic_name__ )
__snake_case : Any = StableDiffusionPanoramaPipeline(**__magic_name__ )
__snake_case : List[Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[Any] = self.get_dummy_inputs(__magic_name__ )
__snake_case : Optional[int] = sd_pipe(**__magic_name__ ).images
__snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : Optional[int] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict , __magic_name__ : Optional[int]=0 ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = torch.manual_seed(__magic_name__ )
__snake_case : Dict = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = """stabilityai/stable-diffusion-2-base"""
__snake_case : List[str] = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" )
__snake_case : Dict = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
__snake_case : Any = self.get_inputs()
__snake_case : int = pipe(**__magic_name__ ).images
__snake_case : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__snake_case : Union[str, Any] = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=__magic_name__ )
__snake_case : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
__snake_case : Tuple = self.get_inputs()
__snake_case : Tuple = pipe(**__magic_name__ ).images
__snake_case : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__snake_case : Optional[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : List[str] = 0
def callback_fn(__magic_name__ : int , __magic_name__ : int , __magic_name__ : torch.FloatTensor ) -> None:
__snake_case : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__snake_case : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__snake_case : Optional[int] = latents[0, -3:, -3:, -1]
__snake_case : List[str] = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__snake_case : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__snake_case : int = latents[0, -3:, -3:, -1]
__snake_case : List[str] = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__snake_case : Optional[Any] = False
__snake_case : List[Any] = """stabilityai/stable-diffusion-2-base"""
__snake_case : Any = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" )
__snake_case : Any = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
__snake_case : List[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
__snake_case : List[Any] = self.get_inputs()
pipe(**__magic_name__ , callback=__magic_name__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case : Tuple = """stabilityai/stable-diffusion-2-base"""
__snake_case : int = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" )
__snake_case : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
__snake_case : int = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__snake_case : Union[str, Any] = self.get_inputs()
__snake_case : Union[str, Any] = pipe(**__magic_name__ )
__snake_case : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 364 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
__snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : str = value
else:
__snake_case : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = []
__snake_case : List[Any] = fairseq_model.state_dict()
__snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__snake_case : Dict = """weight_g"""
elif "weight_v" in name:
__snake_case : List[str] = """weight_v"""
elif "weight" in name:
__snake_case : str = """weight"""
elif "bias" in name:
__snake_case : int = """bias"""
else:
__snake_case : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
__snake_case : Optional[int] = name.split(""".""" )
__snake_case : Dict = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = SEWConfig()
if is_finetuned:
__snake_case : List[Any] = model.wav_encoder.wav_model.cfg
else:
__snake_case : Optional[Any] = model.cfg
__snake_case : Tuple = fs_config.conv_bias
__snake_case : List[Any] = eval(fs_config.conv_feature_layers )
__snake_case : List[Any] = [x[0] for x in conv_layers]
__snake_case : Dict = [x[1] for x in conv_layers]
__snake_case : Tuple = [x[2] for x in conv_layers]
__snake_case : List[str] = """gelu"""
__snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__snake_case : Optional[int] = 0.0
__snake_case : Optional[Any] = fs_config.activation_fn.name
__snake_case : Dict = fs_config.encoder_embed_dim
__snake_case : Dict = 0.02
__snake_case : Any = fs_config.encoder_ffn_embed_dim
__snake_case : Tuple = 1E-5
__snake_case : Dict = fs_config.encoder_layerdrop
__snake_case : Any = fs_config.encoder_attention_heads
__snake_case : int = fs_config.conv_pos_groups
__snake_case : Tuple = fs_config.conv_pos
__snake_case : Optional[int] = len(_lowerCamelCase )
__snake_case : int = fs_config.encoder_layers
__snake_case : Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case : Union[str, Any] = model.cfg
__snake_case : Tuple = fs_config.final_dropout
__snake_case : Tuple = fs_config.layerdrop
__snake_case : Any = fs_config.activation_dropout
__snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case : Tuple = fs_config.attention_dropout
__snake_case : List[Any] = fs_config.dropout_input
__snake_case : Optional[Any] = fs_config.dropout
__snake_case : str = fs_config.mask_channel_length
__snake_case : Any = fs_config.mask_channel_prob
__snake_case : int = fs_config.mask_length
__snake_case : str = fs_config.mask_prob
__snake_case : str = """Wav2Vec2FeatureExtractor"""
__snake_case : Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int:
"""simple docstring"""
if is_finetuned:
__snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case : int = convert_config(model[0] , _lowerCamelCase )
__snake_case : Dict = model[0].eval()
__snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__snake_case : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case : str = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Optional[Any] = target_dict.bos_index
__snake_case : Tuple = target_dict.pad_index
__snake_case : List[str] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : List[str] = len(target_dict.symbols )
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case : List[str] = SEWForCTC(_lowerCamelCase )
else:
__snake_case : List[str] = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 13 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
__UpperCamelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = tmp_path_factory.getbasetemp() / """cache"""
__snake_case : List[str] = test_hf_cache_home / """datasets"""
__snake_case : Union[str, Any] = test_hf_cache_home / """metrics"""
__snake_case : Any = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_lowerCamelCase ) )
__snake_case : List[str] = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_lowerCamelCase ) )
__snake_case : str = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_lowerCamelCase ) )
@pytest.fixture(autouse=_lowerCamelCase , scope="""session""" )
def _a ( ) -> List[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _lowerCamelCase )
@pytest.fixture
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _lowerCamelCase )
| 365 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[int] = pentagonal_nums[j]
__snake_case : str = pentagonal_i + pentagonal_j
__snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
__snake_case : int = number_of_bytes // partitions
__snake_case : int = []
for i in range(_lowerCamelCase ):
__snake_case : str = i * bytes_per_partition + 1
__snake_case : str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : Dict = output[output != -float("""inf""" )]
__snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Tuple = [[1, 0], [1, 1]]
__snake_case : Union[str, Any] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Dict = 1
__snake_case : int = 2
class _A ( tf.Module ):
def __init__( self : Tuple , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : Union[str, Any] = [[2], [1_02, 1_03]]
__snake_case : Tuple = [[1], [1, 1]]
__snake_case : List[str] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : Tuple = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : str = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : int = CompleteSentenceTransformer()
__snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Tuple = complete_model(__magic_name__ )
__snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : str = 14
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = """Hello, my dog is cute and"""
__snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : str = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : int = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 13 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = IFInpaintingSuperResolutionPipeline
lowercase__: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase__: Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowercase__: List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : int=0 ) -> Optional[int]:
"""simple docstring"""
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : Any = torch.manual_seed(__magic_name__ )
else:
__snake_case : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 367 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
__snake_case : int = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("""""" )
print(len(_lowerCamelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 13 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : int=13 , __magic_name__ : Dict=32 , __magic_name__ : List[str]=2 , __magic_name__ : int=3 , __magic_name__ : int=16 , __magic_name__ : str=[32, 64, 1_28] , __magic_name__ : Union[str, Any]=[1, 2, 1] , __magic_name__ : Any=[2, 2, 4] , __magic_name__ : List[Any]=2 , __magic_name__ : List[str]=2.0 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : int="gelu" , __magic_name__ : int=False , __magic_name__ : int=True , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : Any=True , __magic_name__ : int=None , __magic_name__ : str=True , __magic_name__ : int=10 , __magic_name__ : List[str]=8 , __magic_name__ : Optional[Any]=["stage1", "stage2"] , __magic_name__ : Tuple=[1, 2] , ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = parent
__snake_case : str = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[str] = patch_size
__snake_case : Tuple = num_channels
__snake_case : Optional[Any] = embed_dim
__snake_case : str = hidden_sizes
__snake_case : str = depths
__snake_case : str = num_heads
__snake_case : Dict = window_size
__snake_case : List[str] = mlp_ratio
__snake_case : Dict = qkv_bias
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : List[Any] = drop_path_rate
__snake_case : List[str] = hidden_act
__snake_case : Union[str, Any] = use_absolute_embeddings
__snake_case : Tuple = patch_norm
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = is_training
__snake_case : Union[str, Any] = scope
__snake_case : Union[str, Any] = use_labels
__snake_case : Any = type_sequence_label_size
__snake_case : Any = encoder_stride
__snake_case : List[str] = out_features
__snake_case : Optional[Any] = out_indices
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : int ) -> Dict:
"""simple docstring"""
__snake_case : Dict = FocalNetModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Union[str, Any] = model(__magic_name__ )
__snake_case : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Dict ) -> Any:
"""simple docstring"""
__snake_case : str = FocalNetBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : str = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Optional[Any] = None
__snake_case : str = FocalNetBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = FocalNetForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : Tuple = FocalNetForMaskedImageModeling(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = self.type_sequence_label_size
__snake_case : Optional[int] = FocalNetForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Dict = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : Optional[Any] = FocalNetForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__: List[str] = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__: int = False
lowercase__: str = False
lowercase__: List[Any] = False
lowercase__: Optional[int] = False
lowercase__: Optional[Any] = False
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = FocalNetModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 , has_text_modality=__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Optional[int] = model_class(__magic_name__ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.hidden_states
__snake_case : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# FocalNet has a different seq_length
__snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : Dict = reshaped_hidden_states[0].shape
__snake_case : List[str] = (
reshaped_hidden_states[0].view(__magic_name__ , __magic_name__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : Tuple = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : int = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = 3
__snake_case : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : List[Any] = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = FocalNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__magic_name__ )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Optional[int] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__: str = FocalNetConfig
lowercase__: Dict = False
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = FocalNetModelTester(self )
| 368 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCamelCase = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
__UpperCamelCase = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
__UpperCamelCase = soup.find("meta", {"property": "og:image"})["content"]
__UpperCamelCase = requests.get(image_url).content
__UpperCamelCase = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 369 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _A :
lowercase__: str
lowercase__: Optional[str] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return self.version_str
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ".".join(str(_lowerCamelCase ) for v in version_tuple )
| 13 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 370 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__snake_case : Tuple = """"""
while len(_lowerCamelCase ) % 3 != 0:
__snake_case : Any = """0""" + bin_string
__snake_case : Tuple = [
bin_string[index : index + 3]
for index in range(len(_lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case : Tuple = 0
for index, val in enumerate(_lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) )
oct_string += str(_lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "sentencepiece.model"}
__UpperCamelCase = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
__UpperCamelCase = {
"google/rembert": 256,
}
class _A ( __lowercase ):
lowercase__: Optional[Any] = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=True , __magic_name__ : Tuple="[CLS]" , __magic_name__ : Optional[Any]="[SEP]" , __magic_name__ : int="[UNK]" , __magic_name__ : int="[SEP]" , __magic_name__ : Dict="[PAD]" , __magic_name__ : Tuple="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , **__magic_name__ : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , )
__snake_case : Union[str, Any] = do_lower_case
__snake_case : List[Any] = remove_space
__snake_case : Dict = keep_accents
__snake_case : Any = vocab_file
__snake_case : Dict = spm.SentencePieceProcessor()
self.sp_model.Load(__magic_name__ )
@property
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : int , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = d
__snake_case : Optional[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self.sp_model.EncodeAsPieces(__magic_name__ )
return pieces
def lowercase__ ( self : List[Any] , __magic_name__ : Any ) -> str:
"""simple docstring"""
return self.sp_model.PieceToId(__magic_name__ )
def lowercase__ ( self : Optional[int] , __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__magic_name__ )
def lowercase__ ( self : str , __magic_name__ : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = self.sp_model.decode_pieces(__magic_name__ )
return out_string
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Any = [self.sep_token_id]
__snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Any = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__magic_name__ ) )
return
__snake_case : Tuple = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( __lowercase ):
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Dict=10_24 , __magic_name__ : List[Any]=10_24 , __magic_name__ : Optional[Any]=3.6 ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = tokenizer
__snake_case : Tuple = tokenizer.bos_token_id
__snake_case : Optional[int] = dataset
__snake_case : int = seq_length
__snake_case : Optional[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : int = iter(self.dataset )
__snake_case : str = True
while more_examples:
__snake_case : str = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__magic_name__ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
__snake_case : Union[str, Any] = False
break
__snake_case : List[Any] = tokenizer(__magic_name__ , truncation=__magic_name__ )["""input_ids"""]
__snake_case : Union[str, Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__magic_name__ ) , self.seq_length ):
__snake_case : Tuple = all_token_ids[i : i + self.seq_length]
if len(__magic_name__ ) == self.seq_length:
yield torch.tensor(__magic_name__ )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {"""streaming""": True}
__snake_case : int = load_dataset(args.dataset_name , split="""train""" , **_lowerCamelCase )
__snake_case : Any = ConstantLengthDataset(_lowerCamelCase , _lowerCamelCase , seq_length=args.seq_length )
__snake_case : int = DataLoader(_lowerCamelCase , batch_size=args.batch_size )
return eval_dataloader
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
model.eval()
__snake_case : Dict = []
for step, batch in enumerate(_lowerCamelCase ):
with torch.no_grad():
__snake_case : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
__snake_case : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__snake_case : Tuple = torch.mean(torch.cat(_lowerCamelCase ) )
try:
__snake_case : Optional[Any] = torch.exp(_lowerCamelCase )
except OverflowError:
__snake_case : Dict = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
__UpperCamelCase = Accelerator()
# Parse configuration
__UpperCamelCase = HfArgumentParser(EvaluationArguments)
__UpperCamelCase = parser.parse_args()
set_seed(args.seed)
# Logging
__UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__UpperCamelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
__UpperCamelCase , __UpperCamelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
__UpperCamelCase , __UpperCamelCase = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 350 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__UpperCamelCase = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__UpperCamelCase = BASE_URL + "/user"
# https://github.com/settings/tokens
__UpperCamelCase = os.environ.get("USER_TOKEN", "")
def _a ( _lowerCamelCase ) -> dict[Any, Any]:
"""simple docstring"""
__snake_case : str = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 351 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=13 , __magic_name__ : Optional[int]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Any=16 , __magic_name__ : int=[1, 2, 1] , __magic_name__ : Dict=[2, 2, 4] , __magic_name__ : Optional[int]=2 , __magic_name__ : str=2.0 , __magic_name__ : str=True , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : List[str]=0.1 , __magic_name__ : int="gelu" , __magic_name__ : Optional[int]=False , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=1E-5 , __magic_name__ : int=True , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]=True , __magic_name__ : int=10 , __magic_name__ : int=8 , __magic_name__ : str=["stage1", "stage2", "stage3"] , __magic_name__ : List[Any]=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = parent
__snake_case : Optional[int] = batch_size
__snake_case : List[str] = image_size
__snake_case : Any = patch_size
__snake_case : str = num_channels
__snake_case : List[str] = embed_dim
__snake_case : Dict = depths
__snake_case : str = num_heads
__snake_case : str = window_size
__snake_case : List[Any] = mlp_ratio
__snake_case : Optional[int] = qkv_bias
__snake_case : Tuple = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : List[str] = drop_path_rate
__snake_case : Optional[Any] = hidden_act
__snake_case : List[str] = use_absolute_embeddings
__snake_case : List[str] = patch_norm
__snake_case : int = layer_norm_eps
__snake_case : int = initializer_range
__snake_case : int = is_training
__snake_case : Optional[Any] = scope
__snake_case : int = use_labels
__snake_case : Tuple = type_sequence_label_size
__snake_case : Optional[int] = encoder_stride
__snake_case : Union[str, Any] = out_features
__snake_case : int = out_indices
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = MaskFormerSwinModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(__magic_name__ )
__snake_case : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case : int = MaskFormerSwinBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__magic_name__ ):
__snake_case : str = ["""stem"""]
__snake_case : int = MaskFormerSwinBackbone(config=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case : List[str] = config_and_inputs
__snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__: Union[str, Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
lowercase__: Any = False
lowercase__: Any = False
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Any = False
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = MaskFormerSwinModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__magic_name__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(__magic_name__ )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# Swin has a different seq_length
__snake_case : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__snake_case : Any = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = 3
__snake_case : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__snake_case : str = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__magic_name__ : Optional[Any] ):
__snake_case : Any = 0
return t
def check_equivalence(__magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int={} ):
with torch.no_grad():
__snake_case : Union[str, Any] = model(**__magic_name__ , return_dict=__magic_name__ , **__magic_name__ )
__snake_case : Tuple = model(**__magic_name__ , return_dict=__magic_name__ , **__magic_name__ ).to_tuple()
def recursive_check(__magic_name__ : Dict , __magic_name__ : Tuple ):
if isinstance(__magic_name__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__magic_name__ , __magic_name__ ):
recursive_check(__magic_name__ , __magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__magic_name__ , __magic_name__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__magic_name__ ) , set_nan_tensor_to_zero(__magic_name__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(__magic_name__ ).any()} and `inf`: {torch.isinf(__magic_name__ )}. Dict has'''
f''' `nan`: {torch.isnan(__magic_name__ ).any()} and `inf`: {torch.isinf(__magic_name__ )}.'''
) , )
recursive_check(__magic_name__ , __magic_name__ )
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Union[str, Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
__snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ , {"""output_hidden_states""": True} )
__snake_case : Union[str, Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ , {"""output_hidden_states""": True} )
@require_torch
class _A ( unittest.TestCase , __lowercase ):
lowercase__: Optional[int] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__: Optional[Any] = MaskFormerSwinConfig
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = MaskFormerSwinModelTester(self )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__snake_case : Union[str, Any] = backbone_class(__magic_name__ )
backbone.to(__magic_name__ )
backbone.eval()
__snake_case : int = backbone(**__magic_name__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __magic_name__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__snake_case : List[Any] = backbone(**__magic_name__ , output_hidden_states=__magic_name__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__snake_case : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__snake_case : Optional[int] = backbone(**__magic_name__ , output_attentions=__magic_name__ )
self.assertIsNotNone(outputs.attentions )
| 352 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = patch_size
__snake_case : List[str] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : List[str] = use_labels
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = attention_type
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[Any] = scope
__snake_case : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__snake_case : str = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__snake_case : str = self.num_labels
return config
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Any = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
# verify the logits shape
__snake_case : Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__: List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: List[str] = False
lowercase__: List[Any] = False
lowercase__: Dict = False
lowercase__: int = False
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = TimesformerModelTester(self )
__snake_case : List[Any] = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length
__snake_case : Tuple = self.model_tester.num_frames
__snake_case : str = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Optional[int] = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__snake_case : int = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.hidden_states
__snake_case : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__magic_name__ )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Dict = prepare_video()
__snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : int = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _A ( __lowercase ):
lowercase__: List[Any] = (KDPMaDiscreteScheduler,)
lowercase__: List[str] = 10
def lowercase__ ( self : Union[str, Any] , **__magic_name__ : Dict ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__magic_name__ )
return config
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__snake_case : List[Any] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
__snake_case : Dict = self.dummy_model()
__snake_case : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case : List[str] = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case : int = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
__snake_case : List[str] = model(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Tuple = output.prev_sample
__snake_case : Any = torch.sum(torch.abs(__magic_name__ ) )
__snake_case : Optional[Any] = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
if torch_device == "mps":
return
__snake_case : List[Any] = self.scheduler_classes[0]
__snake_case : Optional[Any] = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
__snake_case : List[Any] = self.dummy_model()
__snake_case : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case : Dict = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case : Dict = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
__snake_case : List[str] = model(__magic_name__ , __magic_name__ )
__snake_case : Any = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : List[Any] = output.prev_sample
__snake_case : Union[str, Any] = torch.sum(torch.abs(__magic_name__ ) )
__snake_case : Dict = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if torch_device == "mps":
return
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : Optional[int] = self.get_scheduler_config()
__snake_case : Union[str, Any] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
__snake_case : Dict = self.dummy_model()
__snake_case : Dict = self.dummy_sample_deter.to(__magic_name__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__snake_case : str = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
__snake_case : List[str] = model(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = output.prev_sample
__snake_case : str = torch.sum(torch.abs(__magic_name__ ) )
__snake_case : Optional[int] = torch.mean(torch.abs(__magic_name__ ) )
if str(__magic_name__ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
def _a ( _lowerCamelCase = 100_0000 ) -> int:
"""simple docstring"""
__snake_case : List[str] = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
__snake_case : Tuple = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 354 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : str = 0
__snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if len(_lowerCamelCase ) <= 1:
return arr, 0
__snake_case : Any = len(_lowerCamelCase ) // 2
__snake_case : List[str] = arr[0:mid]
__snake_case : int = arr[mid:]
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = []
__snake_case : List[str] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__snake_case : Any = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
# an empty list should also have zero inversions
__snake_case : List[Any] = []
__snake_case : List[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import socket
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case : Any = socket.gethostname()
__snake_case : Any = 1_2312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
__snake_case : Union[str, Any] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 355 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ViTFeatureExtractor"]
__UpperCamelCase = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = CanineTokenizer
lowercase__: Optional[int] = False
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
__snake_case : Optional[Any] = 10_24
return tokenizer
@require_torch
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.canine_tokenizer
__snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Any = self.canine_tokenizer
__snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.canine_tokenizer
__snake_case : Optional[Any] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__snake_case : Any = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : str = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
__snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__snake_case : List[Any] = chr(0xE007 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE005
__snake_case : Tuple = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
__snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
__snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
__snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Dict = chr(0xE005 )
__snake_case : str = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__snake_case : Tuple = tokenizer.tokenize(__magic_name__ )
__snake_case : Any = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__snake_case : Optional[Any] = 0xE006
__snake_case : List[str] = chr(__magic_name__ )
__snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Any = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Tuple = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE006
__snake_case : int = chr(__magic_name__ )
__snake_case : List[Any] = [new_token_a]
__snake_case : Union[str, Any] = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__snake_case : Any = 0xE007
__snake_case : Any = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
__snake_case : Union[str, Any] = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : List[str] = """hello world"""
if self.space_between_special_tokens:
__snake_case : Union[str, Any] = """[CLS] hello world [SEP]"""
else:
__snake_case : List[Any] = input
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__snake_case : Dict = """a"""
__snake_case : Tuple = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
__snake_case : Dict = 0xE006
__snake_case : str = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
| 13 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A ( __lowercase , unittest.TestCase ):
lowercase__: str = ShapEPipeline
lowercase__: Union[str, Any] = ['''prompt''']
lowercase__: Dict = ['''prompt''']
lowercase__: Union[str, Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase__: Union[str, Any] = False
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 8
@property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__magic_name__ )
@property
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__snake_case : List[Any] = PriorTransformer(**__magic_name__ )
return model
@property
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : Optional[Any] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__snake_case : Any = ShapERenderer(**__magic_name__ )
return model
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__snake_case : int = self.dummy_prior
__snake_case : Optional[int] = self.dummy_text_encoder
__snake_case : str = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_renderer
__snake_case : str = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=__magic_name__ , clip_sample=__magic_name__ , clip_sample_range=1.0 , )
__snake_case : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowercase__ ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : str=0 ) -> Any:
"""simple docstring"""
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : int = torch.manual_seed(__magic_name__ )
else:
__snake_case : Dict = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Any = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = """cpu"""
__snake_case : Optional[int] = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : str = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : str = output.images[0]
__snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : List[Any] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = torch_device == """cpu"""
__snake_case : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__magic_name__ , relax_max_difference=__magic_name__ , )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : str = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Any = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : Optional[int] = 1
__snake_case : Union[str, Any] = 2
__snake_case : Any = self.get_dummy_inputs(__magic_name__ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Optional[int] = batch_size * [inputs[key]]
__snake_case : Tuple = pipe(**__magic_name__ , num_images_per_prompt=__magic_name__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
__snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__snake_case : Union[str, Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
__snake_case : Any = pipe(
"""a shark""" , generator=__magic_name__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 357 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13 | 0 |
'''simple docstring'''
from math import sqrt
def _a ( _lowerCamelCase : List[Any] ) -> bool:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
__snake_case : Optional[Any] = True
# 0 and 1 are none primes.
if number <= 1:
__snake_case : Dict = False
for divisor in range(2 , int(round(sqrt(_lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__snake_case : Any = False
break
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'status' must been from type bool"
return status
def _a ( _lowerCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__snake_case : int = list(range(2 , n + 1 ) )
__snake_case : int = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__snake_case : str = 0
# filters actual prime numbers.
__snake_case : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list"
return ans
def _a ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
__snake_case : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowerCamelCase ):
ans.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list"
return ans
def _a ( _lowerCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
__snake_case : int = [] # this list will be returns of the function.
# potential prime number factors.
__snake_case : List[str] = 2
__snake_case : List[Any] = number
if number == 0 or number == 1:
ans.append(_lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCamelCase ):
while quotient != 1:
if is_prime(_lowerCamelCase ) and (quotient % factor == 0):
ans.append(_lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list"
return ans
def _a ( _lowerCamelCase : int ) -> Any:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__snake_case : List[str] = 0
# prime factorization of 'number'
__snake_case : Any = prime_factorization(_lowerCamelCase )
__snake_case : Any = max(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type int"
return ans
def _a ( _lowerCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__snake_case : List[Any] = 0
# prime factorization of 'number'
__snake_case : int = prime_factorization(_lowerCamelCase )
__snake_case : List[str] = min(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type int"
return ans
def _a ( _lowerCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _a ( _lowerCamelCase : Dict ) -> Dict:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _a ( _lowerCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and (number > 2) and is_even(_lowerCamelCase )
), "'number' must been an int, even and > 2"
__snake_case : List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__snake_case : Tuple = get_prime_numbers(_lowerCamelCase )
__snake_case : Dict = len(_lowerCamelCase )
# run variable for while-loops.
__snake_case : Union[str, Any] = 0
__snake_case : Dict = None
# exit variable. for break up the loops
__snake_case : int = True
while i < len_pn and loop:
__snake_case : Optional[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__snake_case : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and (len(_lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _a ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> Optional[int]:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__snake_case : Optional[Any] = 0
while numbera != 0:
__snake_case : Union[str, Any] = numbera % numbera
__snake_case : Any = numbera
__snake_case : Dict = rest
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _a ( _lowerCamelCase : str , _lowerCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__snake_case : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__snake_case : int = prime_factorization(_lowerCamelCase )
__snake_case : Optional[int] = prime_factorization(_lowerCamelCase )
elif numbera == 1 or numbera == 1:
__snake_case : List[str] = []
__snake_case : List[str] = []
__snake_case : Dict = max(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[int] = 0
__snake_case : Union[str, Any] = 0
__snake_case : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__snake_case : Optional[Any] = prime_fac_a.count(_lowerCamelCase )
__snake_case : int = prime_fac_a.count(_lowerCamelCase )
for _ in range(max(_lowerCamelCase , _lowerCamelCase ) ):
ans *= n
else:
__snake_case : Union[str, Any] = prime_fac_a.count(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
ans *= n
done.append(_lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__snake_case : List[str] = prime_fac_a.count(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
ans *= n
done.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _a ( _lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
__snake_case : Dict = 0
__snake_case : Optional[Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCamelCase ):
ans += 1
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and is_prime(
_lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _a ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ) -> int:
"""simple docstring"""
assert (
is_prime(_lowerCamelCase ) and is_prime(_lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__snake_case : List[str] = p_number_a + 1 # jump to the next number
__snake_case : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and ans[0] != p_number_a
and ans[len(_lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _a ( _lowerCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
__snake_case : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _a ( _lowerCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__snake_case : Optional[int] = get_divisors(_lowerCamelCase )
# precondition
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _a ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__snake_case : Tuple = gcd(abs(_lowerCamelCase ) , abs(_lowerCamelCase ) )
# precondition
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _a ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
__snake_case : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _a ( _lowerCamelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
__snake_case : Any = 0
__snake_case : Tuple = 1
__snake_case : str = 1 # this will be return
for _ in range(n - 1 ):
__snake_case : Any = ans
ans += fiba
__snake_case : Any = tmp
return ans
| 358 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: str = '''codegen'''
lowercase__: Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
__snake_case : List[str] = vocab_size
__snake_case : Union[str, Any] = n_ctx
__snake_case : int = n_positions
__snake_case : str = n_embd
__snake_case : Dict = n_layer
__snake_case : List[Any] = n_head
__snake_case : Any = n_inner
__snake_case : str = rotary_dim
__snake_case : List[str] = activation_function
__snake_case : Tuple = resid_pdrop
__snake_case : Dict = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = use_cache
__snake_case : Dict = bos_token_id
__snake_case : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , """pad_token_id""" , __magic_name__ ):
# TODO: how to do that better?
__snake_case : List[str] = 0
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
__snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Tuple = seqlen + 2
__snake_case : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : List[str] = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
__snake_case : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
| 13 | 0 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = RoFormerTokenizer
lowercase__: List[Any] = RoFormerTokenizerFast
lowercase__: Any = True
lowercase__: Optional[int] = True
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
def lowercase__ ( self : str , **__magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__magic_name__ )
def lowercase__ ( self : Dict , **__magic_name__ : Optional[Any] ) -> int:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__magic_name__ )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : str = """永和服装饰品有限公司,今天天气非常好"""
__snake_case : str = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : List[str] = self.get_chinese_input_output_texts()
__snake_case : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , output_text.split() )
__snake_case : Union[str, Any] = tokens + [tokenizer.unk_token]
__snake_case : Any = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : Tuple = self.get_rust_tokenizer()
__snake_case : int = self.get_chinese_input_output_texts()
__snake_case : Union[str, Any] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , output_text.split() )
__snake_case : Dict = tokens + [tokenizer.unk_token]
__snake_case : str = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
| 359 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 0 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
__snake_case : Union[str, Any] = iter(_lowerCamelCase )
while True:
__snake_case : str = tuple(itertools.islice(_lowerCamelCase , _lowerCamelCase ) )
if not chunk:
return
yield chunk
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
__snake_case : List[Any] = """"""
if len(_lowerCamelCase ) < 2:
return dirty
for i in range(len(_lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCamelCase ) & 1:
clean += "X"
return clean
def _a ( _lowerCamelCase ) -> list[str]:
"""simple docstring"""
__snake_case : List[str] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__snake_case : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCamelCase )
return table
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Dict = generate_table(_lowerCamelCase )
__snake_case : int = prepare_input(_lowerCamelCase )
__snake_case : List[Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
__snake_case : Union[str, Any] = divmod(table.index(_lowerCamelCase ) , 5 )
__snake_case : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = generate_table(_lowerCamelCase )
__snake_case : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
__snake_case : List[str] = divmod(table.index(_lowerCamelCase ) , 5 )
__snake_case : List[str] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 360 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__UpperCamelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _A ( __lowercase ):
lowercase__: Any = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask''']
lowercase__: List[str] = BartTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
__snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Any = """post_processor"""
__snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
__snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : int = tuple(state["""cls"""] )
__snake_case : Optional[int] = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Optional[Any] = add_prefix_space
__snake_case : List[str] = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
__snake_case : Optional[int] = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) )
__snake_case : List[Any] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
__snake_case : Union[str, Any] = value
def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 13 | 0 |
'''simple docstring'''
from math import ceil
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = list(range(0 , _lowerCamelCase ) )
__snake_case : Dict = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__snake_case : Dict = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
__snake_case : Union[str, Any] = [i for i in blocks if i not in device_map_blocks]
__snake_case : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = list(range(_lowerCamelCase ) )
__snake_case : int = int(ceil(n_layers / len(_lowerCamelCase ) ) )
__snake_case : Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , _lowerCamelCase , _lowerCamelCase )]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
| 361 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = a.name
__snake_case : Dict = b.name
__snake_case : Optional[int] = """"""
__snake_case : int = """"""
__snake_case : Any = a == b
__snake_case : List[Any] = name_a
__snake_case : List[str] = name_b
return res
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = os.path.dirname(_lowerCamelCase )
__snake_case : Dict = os.path.basename(_lowerCamelCase )
__snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : Optional[int] = set()
__snake_case : Optional[Any] = {}
__snake_case : Tuple = []
__snake_case : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
__snake_case : Any = inits[i].name
__snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__snake_case : int = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = """optimized_""" + model_file_name
__snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 13 | 0 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 362 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ["small", "medium", "large"]
__UpperCamelCase = "lm_head.decoder.weight"
__UpperCamelCase = "lm_head.weight"
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase )
__snake_case : Optional[int] = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__UpperCamelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase = 3 , _lowerCamelCase = 7 , _lowerCamelCase = 100_0000 ) -> int:
"""simple docstring"""
__snake_case : Dict = 0
__snake_case : Tuple = 1
for current_denominator in range(1 , limit + 1 ):
__snake_case : List[str] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__snake_case : int = current_numerator
__snake_case : int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 363 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : int = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : str = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case : List[str] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
__snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : str = value
else:
__snake_case : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = []
__snake_case : List[Any] = fairseq_model.state_dict()
__snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__snake_case : Dict = """weight_g"""
elif "weight_v" in name:
__snake_case : List[str] = """weight_v"""
elif "weight" in name:
__snake_case : str = """weight"""
elif "bias" in name:
__snake_case : int = """bias"""
else:
__snake_case : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
__snake_case : Optional[int] = name.split(""".""" )
__snake_case : Dict = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = SEWConfig()
if is_finetuned:
__snake_case : List[Any] = model.wav_encoder.wav_model.cfg
else:
__snake_case : Optional[Any] = model.cfg
__snake_case : Tuple = fs_config.conv_bias
__snake_case : List[Any] = eval(fs_config.conv_feature_layers )
__snake_case : List[Any] = [x[0] for x in conv_layers]
__snake_case : Dict = [x[1] for x in conv_layers]
__snake_case : Tuple = [x[2] for x in conv_layers]
__snake_case : List[str] = """gelu"""
__snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__snake_case : Optional[int] = 0.0
__snake_case : Optional[Any] = fs_config.activation_fn.name
__snake_case : Dict = fs_config.encoder_embed_dim
__snake_case : Dict = 0.02
__snake_case : Any = fs_config.encoder_ffn_embed_dim
__snake_case : Tuple = 1E-5
__snake_case : Dict = fs_config.encoder_layerdrop
__snake_case : Any = fs_config.encoder_attention_heads
__snake_case : int = fs_config.conv_pos_groups
__snake_case : Tuple = fs_config.conv_pos
__snake_case : Optional[int] = len(_lowerCamelCase )
__snake_case : int = fs_config.encoder_layers
__snake_case : Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case : Union[str, Any] = model.cfg
__snake_case : Tuple = fs_config.final_dropout
__snake_case : Tuple = fs_config.layerdrop
__snake_case : Any = fs_config.activation_dropout
__snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case : Tuple = fs_config.attention_dropout
__snake_case : List[Any] = fs_config.dropout_input
__snake_case : Optional[Any] = fs_config.dropout
__snake_case : str = fs_config.mask_channel_length
__snake_case : Any = fs_config.mask_channel_prob
__snake_case : int = fs_config.mask_length
__snake_case : str = fs_config.mask_prob
__snake_case : str = """Wav2Vec2FeatureExtractor"""
__snake_case : Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int:
"""simple docstring"""
if is_finetuned:
__snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case : int = convert_config(model[0] , _lowerCamelCase )
__snake_case : Dict = model[0].eval()
__snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__snake_case : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case : str = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Optional[Any] = target_dict.bos_index
__snake_case : Tuple = target_dict.pad_index
__snake_case : List[str] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : List[str] = len(target_dict.symbols )
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case : List[str] = SEWForCTC(_lowerCamelCase )
else:
__snake_case : List[str] = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[int] = pentagonal_nums[j]
__snake_case : str = pentagonal_i + pentagonal_j
__snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 13 | 0 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase = 16
__UpperCamelCase = 32
def _a ( _lowerCamelCase , _lowerCamelCase = 16 ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__snake_case : int = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case : Any = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case : str = 16
elif accelerator.mixed_precision != "no":
__snake_case : int = 8
else:
__snake_case : List[str] = None
return tokenizer.pad(
_lowerCamelCase , padding="""longest""" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__snake_case : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase )
__snake_case : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case : List[str] = config["""lr"""]
__snake_case : Optional[int] = int(config["""num_epochs"""] )
__snake_case : List[Any] = int(config["""seed"""] )
__snake_case : int = int(config["""batch_size"""] )
__snake_case : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__snake_case : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__snake_case : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
__snake_case : Dict = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
__snake_case : Optional[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
__snake_case : str = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
__snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case : Optional[int] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__snake_case : int = model(**_lowerCamelCase )
__snake_case : Optional[int] = outputs.loss
__snake_case : str = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case : Dict = model(**_lowerCamelCase )
__snake_case : int = outputs.logits.argmax(dim=-1 )
__snake_case : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
__snake_case : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCamelCase )
def _a ( ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__snake_case : str = parser.parse_args()
__snake_case : List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 366 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : Dict = output[output != -float("""inf""" )]
__snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Tuple = [[1, 0], [1, 1]]
__snake_case : Union[str, Any] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Dict = 1
__snake_case : int = 2
class _A ( tf.Module ):
def __init__( self : Tuple , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : Union[str, Any] = [[2], [1_02, 1_03]]
__snake_case : Tuple = [[1], [1, 1]]
__snake_case : List[str] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : Tuple = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : str = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : int = CompleteSentenceTransformer()
__snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Tuple = complete_model(__magic_name__ )
__snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : str = 14
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = """Hello, my dog is cute and"""
__snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : str = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : int = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 13 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _A ( __lowercase , __lowercase ):
lowercase__: Union[str, Any] = '''dinat'''
lowercase__: str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple , __magic_name__ : str=4 , __magic_name__ : str=3 , __magic_name__ : int=64 , __magic_name__ : Tuple=[3, 4, 6, 5] , __magic_name__ : Any=[2, 4, 8, 16] , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __magic_name__ : Tuple=3.0 , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.02 , __magic_name__ : Union[str, Any]=1E-5 , __magic_name__ : str=0.0 , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , **__magic_name__ : int , ) -> str:
"""simple docstring"""
super().__init__(**__magic_name__ )
__snake_case : Optional[int] = patch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = embed_dim
__snake_case : List[str] = depths
__snake_case : List[Any] = len(__magic_name__ )
__snake_case : Optional[Any] = num_heads
__snake_case : List[Any] = kernel_size
__snake_case : Tuple = dilations
__snake_case : List[Any] = mlp_ratio
__snake_case : Optional[int] = qkv_bias
__snake_case : str = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = drop_path_rate
__snake_case : str = hidden_act
__snake_case : Tuple = layer_norm_eps
__snake_case : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case : str = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
__snake_case : str = layer_scale_init_value
__snake_case : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__magic_name__ ) + 1 )]
__snake_case : str = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 367 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
__snake_case : int = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("""""" )
print(len(_lowerCamelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 13 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _A ( __lowercase ):
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : int = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
__snake_case : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
__snake_case : int = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__snake_case : List[str] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def lowercase__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__snake_case : Any = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
import PIL.Image
__snake_case : List[Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__magic_name__ ) as mock_cast_to_python_objects:
__snake_case : Dict = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
__snake_case : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __magic_name__ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = pa.BufferReader(_lowerCamelCase ) if isinstance(_lowerCamelCase , pa.Buffer ) else pa.memory_map(_lowerCamelCase )
__snake_case : int = pa.ipc.open_stream(_lowerCamelCase )
__snake_case : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : int = pa.BufferOutputStream()
__snake_case : Optional[int] = pa.schema(_lowerCamelCase ) if fields else None
with ArrowWriter(stream=_lowerCamelCase , schema=_lowerCamelCase , writer_batch_size=_lowerCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__snake_case : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__snake_case : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _a ( ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = pa.BufferOutputStream()
__snake_case : Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_lowerCamelCase , features=_lowerCamelCase ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
__snake_case : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__snake_case : Optional[Any] = pa.BufferReader(output.getvalue() )
__snake_case : Optional[int] = pa.ipc.open_stream(_lowerCamelCase )
__snake_case : pa.Table = f.read_all()
__snake_case : Optional[int] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_lowerCamelCase )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : int = pa.BufferOutputStream()
with ArrowWriter(
stream=_lowerCamelCase , writer_batch_size=_lowerCamelCase , hash_salt="""split_name""" , check_duplicates=_lowerCamelCase , ) as writer:
with pytest.raises(_lowerCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
__snake_case : Tuple = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = pa.BufferOutputStream()
with ArrowWriter(
stream=_lowerCamelCase , writer_batch_size=_lowerCamelCase , hash_salt="""split_name""" , check_duplicates=_lowerCamelCase , ) as writer:
with pytest.raises(_lowerCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
__snake_case : List[str] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : int = pa.BufferOutputStream()
with ArrowWriter(
stream=_lowerCamelCase , writer_batch_size=_lowerCamelCase , hash_salt="""split_name""" , check_duplicates=_lowerCamelCase , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
__snake_case : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = pa.BufferOutputStream()
__snake_case : Any = pa.schema(_lowerCamelCase ) if fields else None
with ArrowWriter(stream=_lowerCamelCase , schema=_lowerCamelCase , writer_batch_size=_lowerCamelCase ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
__snake_case : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__snake_case : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : int = pa.BufferOutputStream()
__snake_case : List[str] = pa.schema(_lowerCamelCase ) if fields else None
with ArrowWriter(stream=_lowerCamelCase , schema=_lowerCamelCase , writer_batch_size=_lowerCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
__snake_case : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__snake_case : str = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = pa.BufferOutputStream()
__snake_case : str = pa.schema(_lowerCamelCase ) if fields else None
with ArrowWriter(stream=_lowerCamelCase , schema=_lowerCamelCase , writer_batch_size=_lowerCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
__snake_case : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__snake_case : Optional[int] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _a ( ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
__snake_case : List[str] = os.path.join(_lowerCamelCase , """test.arrow""" )
with ArrowWriter(path=_lowerCamelCase , schema=pa.schema(_lowerCamelCase ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
__snake_case : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata )
_check_output(_lowerCamelCase , 1 )
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
if pa.types.is_list(_lowerCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
if isinstance(lst[0] , _lowerCamelCase ):
change_first_primitive_element_in_list(lst[0] , _lowerCamelCase )
else:
__snake_case : int = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = pa.array(TypedSequence(_lowerCamelCase , optimized_int_type=_lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = pa.array(OptimizedTypedSequence(_lowerCamelCase , col=_lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__snake_case : Optional[int] = copy.deepcopy(_lowerCamelCase )
__snake_case : Dict = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_lowerCamelCase , _lowerCamelCase )
__snake_case : Dict = pa.array(OptimizedTypedSequence(_lowerCamelCase , col=_lowerCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : int = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_lowerCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : int = """mock://dataset-train.arrow"""
with ArrowWriter(path=_lowerCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_lowerCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__snake_case : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : str = pa.BufferOutputStream()
with ParquetWriter(stream=_lowerCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__snake_case : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__snake_case : str = pa.BufferReader(output.getvalue() )
__snake_case : pa.Table = pq.read_table(_lowerCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
import PIL.Image
__snake_case : Optional[Any] = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_lowerCamelCase , format="""png""" )
__snake_case : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=_lowerCamelCase , features=Features({"""image""": Image()} ) , embed_local_files=_lowerCamelCase ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
__snake_case : Any = pa.BufferReader(output.getvalue() )
__snake_case : pa.Table = pq.read_table(_lowerCamelCase )
__snake_case : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _lowerCamelCase )
with open(_lowerCamelCase , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _a ( ) -> str:
"""simple docstring"""
__snake_case : List[Any] = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_lowerCamelCase )] )
__snake_case : Dict = pa.BufferOutputStream()
with ArrowWriter(stream=_lowerCamelCase ) as writer:
writer._build_writer(inferred_schema=_lowerCamelCase )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 368 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13 | 0 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 369 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _A :
lowercase__: str
lowercase__: Optional[str] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return self.version_str
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ".".join(str(_lowerCamelCase ) for v in version_tuple )
| 13 | 0 |
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__lowercase ):
lowercase__: int = ['''speech''']
def __init__( self : Tuple , *__magic_name__ : List[Any] , **__magic_name__ : Dict ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""speech"""] )
class _A ( metaclass=__lowercase ):
lowercase__: List[str] = ['''speech''']
def __init__( self : Optional[Any] , *__magic_name__ : Optional[Any] , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""speech"""] )
| 370 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__snake_case : Tuple = """"""
while len(_lowerCamelCase ) % 3 != 0:
__snake_case : Any = """0""" + bin_string
__snake_case : Tuple = [
bin_string[index : index + 3]
for index in range(len(_lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case : Tuple = 0
for index, val in enumerate(_lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) )
oct_string += str(_lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
'''simple docstring'''
def _a ( ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = min(_lowerCamelCase ) # min() finds the minimum value
__snake_case : Optional[Any] = max(_lowerCamelCase ) # max() finds the maximum value
__snake_case : List[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__snake_case : Optional[int] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__snake_case : List[str] = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
__snake_case : str = count + min_val
i += 1
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 350 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _A ( unittest.TestCase ):
def __init__( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=13 , __magic_name__ : List[Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Dict=99 , __magic_name__ : List[str]=32 , __magic_name__ : Optional[int]=5 , __magic_name__ : Tuple=4 , __magic_name__ : Dict=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : List[str]=16 , __magic_name__ : Tuple=2 , __magic_name__ : List[str]=0.02 , __magic_name__ : List[Any]=4 , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Union[str, Any] = seq_length
__snake_case : int = is_training
__snake_case : List[str] = use_attention_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : int = vocab_size
__snake_case : Optional[Any] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[int] = num_choices
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[Any] = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : int = None
if self.use_token_type_ids:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case : Optional[Any] = config_and_inputs
__snake_case : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Optional[Any] = True
lowercase__: int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = FlaxRoFormerModelTester(self )
@slow
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
__snake_case : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__snake_case : List[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case : Tuple = model(__magic_name__ )[0]
__snake_case : Any = 5_00_00
__snake_case : Tuple = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Tuple = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 351 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__snake_case : Tuple = """"""
while len(_lowerCamelCase ) % 3 != 0:
__snake_case : Any = """0""" + bin_string
__snake_case : Tuple = [
bin_string[index : index + 3]
for index in range(len(_lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case : Tuple = 0
for index, val in enumerate(_lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) )
oct_string += str(_lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = patch_size
__snake_case : List[str] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : List[str] = use_labels
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = attention_type
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[Any] = scope
__snake_case : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__snake_case : str = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__snake_case : str = self.num_labels
return config
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Any = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
# verify the logits shape
__snake_case : Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__: List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: List[str] = False
lowercase__: List[Any] = False
lowercase__: Dict = False
lowercase__: int = False
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = TimesformerModelTester(self )
__snake_case : List[Any] = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length
__snake_case : Tuple = self.model_tester.num_frames
__snake_case : str = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Optional[int] = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__snake_case : int = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.hidden_states
__snake_case : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__magic_name__ )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Dict = prepare_video()
__snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : int = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _a ( ) -> None:
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""rsa""" , 1024 )
print("""Key files generation successful.""" )
def _a ( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
"""simple docstring"""
print("""Generating prime p...""" )
__snake_case : Dict = rabinMiller.generate_large_prime(_lowerCamelCase )
print("""Generating prime q...""" )
__snake_case : Dict = rabinMiller.generate_large_prime(_lowerCamelCase )
__snake_case : str = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
__snake_case : str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
__snake_case : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
__snake_case : Tuple = (n, e)
__snake_case : Optional[Any] = (n, d)
return (public_key, private_key)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> None:
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
__snake_case : Union[str, Any] = generate_key(_lowerCamelCase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Tuple = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__snake_case : Optional[Any] = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__magic_name__ ) , torch_builtin(__magic_name__ ) ) )
self.assertFalse(torch.allclose(gelu_python(__magic_name__ ) , gelu_new(__magic_name__ ) ) )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__snake_case : Optional[Any] = get_activation("""gelu""" )
__snake_case : List[Any] = get_activation("""gelu_10""" )
__snake_case : List[str] = torch_builtin(__magic_name__ )
__snake_case : List[str] = geluaa(__magic_name__ )
__snake_case : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__magic_name__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__magic_name__ ):
get_activation("""bogus""" )
with self.assertRaises(__magic_name__ ):
get_activation(__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = get_activation("""gelu""" )
__snake_case : Optional[Any] = 1
__snake_case : str = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__magic_name__ ):
__snake_case : Tuple = acta.a
| 354 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : str = 0
__snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if len(_lowerCamelCase ) <= 1:
return arr, 0
__snake_case : Any = len(_lowerCamelCase ) // 2
__snake_case : List[str] = arr[0:mid]
__snake_case : int = arr[mid:]
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = []
__snake_case : List[str] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__snake_case : Any = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
# an empty list should also have zero inversions
__snake_case : List[Any] = []
__snake_case : List[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import math
__UpperCamelCase = 10
__UpperCamelCase = 7
__UpperCamelCase = BALLS_PER_COLOUR * NUM_COLOURS
def _a ( _lowerCamelCase = 20 ) -> str:
"""simple docstring"""
__snake_case : int = math.comb(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCamelCase )
__snake_case : Tuple = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 355 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.