code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 716 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 717 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def _a( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : Optional[str], UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tesseract_config if tesseract_config is not None else ''''''
# apply OCR
SCREAMING_SNAKE_CASE__ : Tuple =to_pil_image(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =pil_image.size
SCREAMING_SNAKE_CASE__ : str =pytesseract.image_to_data(UpperCamelCase__, lang=UpperCamelCase__, output_type='''dict''', config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Tuple =[idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Any =[word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : str =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
for x, y, w, h in zip(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Dict =[x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : Any , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "" , **__lowercase : List[str] , ) -> None:
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize
SCREAMING_SNAKE_CASE__ : Any =size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =resample
SCREAMING_SNAKE_CASE__ : Optional[int] =apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] =ocr_lang
SCREAMING_SNAKE_CASE__ : Optional[int] =tesseract_config
def __magic_name__ ( self : List[str] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : List[Any] =(size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : str , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase )
SCREAMING_SNAKE_CASE__ : int =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] =apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : int =ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Optional[int] =tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : List[str] =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : int =[to_numpy_array(__lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
for image in images:
SCREAMING_SNAKE_CASE__ : List[Any] =apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
SCREAMING_SNAKE_CASE__ : int =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Optional[int] =[flip_channel_order(__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Tuple =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : int =BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =words_batch
SCREAMING_SNAKE_CASE__ : Optional[int] =boxes_batch
return data
| 718 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['MaskFormerFeatureExtractor']
a_ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
a_ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a( ):
'''simple docstring'''
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 720 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
a_ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =_TestCommandArgs(dataset=UpperCamelCase__, all_configs=UpperCamelCase__, save_infos=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =TestCommand(*UpperCamelCase__ )
test_command.run()
SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(UpperCamelCase__, '''README.md''' )
assert os.path.exists(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =DatasetInfosDict.from_directory(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ), splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_3_5_1_5_6_3,
'''num_examples''': 1_0_0_0_0,
},
{
'''name''': '''validation''',
'''num_bytes''': 2_3_8_4_1_8,
'''num_examples''': 1_0_0_0,
},
], download_size=3_9_4_0_6_8_0, dataset_size=2_5_8_9_9_8_1, )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
SCREAMING_SNAKE_CASE__ : Optional[int] =getattr(dataset_infos['''default'''], UpperCamelCase__ ), getattr(expected_dataset_infos['''default'''], UpperCamelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase__, UpperCamelCase__ )
elif key == "splits":
assert list(UpperCamelCase__ ) == list(UpperCamelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes )
else:
result == expected
| 721 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 0 |
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =0
# if input_string is "aba" than new_input_string become "a|b|a"
SCREAMING_SNAKE_CASE__ : Optional[Any] =''''''
SCREAMING_SNAKE_CASE__ : Dict =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(UpperCamelCase__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
SCREAMING_SNAKE_CASE__ : Optional[int] =0, 0
# length[i] shows the length of palindromic substring with center i
SCREAMING_SNAKE_CASE__ : Tuple =[1 for i in range(len(UpperCamelCase__ ) )]
# for each character in new_string find corresponding palindromic string
SCREAMING_SNAKE_CASE__ : Tuple =0
for j in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ : str =1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(UpperCamelCase__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
SCREAMING_SNAKE_CASE__ : Dict =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
SCREAMING_SNAKE_CASE__ : Optional[int] =j - k + 1 # noqa: E741
SCREAMING_SNAKE_CASE__ : Union[str, Any] =j + k - 1
# update max_length and start position
if max_length < length[j]:
SCREAMING_SNAKE_CASE__ : Any =length[j]
SCREAMING_SNAKE_CASE__ : List[str] =j
# create that string
SCREAMING_SNAKE_CASE__ : List[Any] =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 701 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """audio-spectrogram-transformer"""
def __init__( self : List[Any] , __lowercase : Tuple=7_68 , __lowercase : Union[str, Any]=12 , __lowercase : int=12 , __lowercase : Union[str, Any]=30_72 , __lowercase : List[str]="gelu" , __lowercase : Union[str, Any]=0.0 , __lowercase : int=0.0 , __lowercase : Optional[int]=0.02 , __lowercase : Union[str, Any]=1e-12 , __lowercase : Dict=16 , __lowercase : Tuple=True , __lowercase : Union[str, Any]=10 , __lowercase : Any=10 , __lowercase : str=10_24 , __lowercase : Dict=1_28 , **__lowercase : Dict , ) -> int:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : Any =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict =intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Any =qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] =frequency_stride
SCREAMING_SNAKE_CASE__ : int =time_stride
SCREAMING_SNAKE_CASE__ : str =max_length
SCREAMING_SNAKE_CASE__ : List[Any] =num_mel_bins
| 702 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {'facebook/blenderbot_small-90M': 5_1_2}
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =set()
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : List[Any] =char
SCREAMING_SNAKE_CASE__ : Any =set(UpperCamelCase__ )
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Optional[int]="__start__" , __lowercase : Union[str, Any]="__end__" , __lowercase : int="__unk__" , __lowercase : Union[str, Any]="__null__" , **__lowercase : Optional[Any] , ) -> int:
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Dict =merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : Tuple =[tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE__ : Dict =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : str ={}
@property
def __magic_name__ ( self : Union[str, Any] ) -> int:
return len(self.encoder )
def __magic_name__ ( self : List[str] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> str:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Dict =re.sub('''([.,!?()])''' , r''' \1''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =re.sub('''(\')''' , r''' \1 ''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =re.sub(r'''\s{2,}''' , ''' ''' , __lowercase )
if "\n" in token:
SCREAMING_SNAKE_CASE__ : Any =token.replace('''\n''' , ''' __newln__''' )
SCREAMING_SNAKE_CASE__ : Dict =token.split(''' ''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
for token in tokens:
if not len(__lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Optional[int] =token.lower()
SCREAMING_SNAKE_CASE__ : Optional[int] =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ : int =get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
SCREAMING_SNAKE_CASE__ : str =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ : Optional[int] =bigram
SCREAMING_SNAKE_CASE__ : int =[]
SCREAMING_SNAKE_CASE__ : str =0
while i < len(__lowercase ):
try:
SCREAMING_SNAKE_CASE__ : List[Any] =word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : List[str] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : Any =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_word
if len(__lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Dict =get_pairs(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict ='''@@ '''.join(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[:-4]
SCREAMING_SNAKE_CASE__ : Any =word
words.append(__lowercase )
return " ".join(__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =[]
SCREAMING_SNAKE_CASE__ : str =re.findall(r'''\S+\n?''' , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : Any =token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : List[str] , __lowercase : int ) -> str:
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : str , __lowercase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : int =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : Dict =0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 703 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """data2vec-vision"""
def __init__( self : Tuple , __lowercase : str=7_68 , __lowercase : Tuple=12 , __lowercase : Tuple=12 , __lowercase : Optional[int]=30_72 , __lowercase : Any="gelu" , __lowercase : List[Any]=0.0 , __lowercase : Dict=0.0 , __lowercase : str=0.02 , __lowercase : List[str]=1e-12 , __lowercase : Tuple=2_24 , __lowercase : Dict=16 , __lowercase : int=3 , __lowercase : Dict=False , __lowercase : Optional[Any]=False , __lowercase : List[str]=False , __lowercase : Optional[Any]=False , __lowercase : Tuple=0.1 , __lowercase : int=0.1 , __lowercase : List[Any]=True , __lowercase : List[Any]=[3, 5, 7, 11] , __lowercase : Any=[1, 2, 3, 6] , __lowercase : Dict=True , __lowercase : Optional[int]=0.4 , __lowercase : Optional[Any]=2_56 , __lowercase : Tuple=1 , __lowercase : Optional[Any]=False , __lowercase : int=2_55 , **__lowercase : Any , ) -> List[str]:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Any =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : int =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =patch_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : List[Any] =use_mask_token
SCREAMING_SNAKE_CASE__ : List[str] =use_absolute_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_relative_position_bias
SCREAMING_SNAKE_CASE__ : List[Any] =use_shared_relative_position_bias
SCREAMING_SNAKE_CASE__ : str =layer_scale_init_value
SCREAMING_SNAKE_CASE__ : Optional[int] =drop_path_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Dict =out_indices
SCREAMING_SNAKE_CASE__ : int =pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_auxiliary_head
SCREAMING_SNAKE_CASE__ : str =auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : Optional[int] =auxiliary_channels
SCREAMING_SNAKE_CASE__ : Any =auxiliary_num_convs
SCREAMING_SNAKE_CASE__ : List[Any] =auxiliary_concat_input
SCREAMING_SNAKE_CASE__ : List[str] =semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = version.parse("""1.11""" )
@property
def __magic_name__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__ ( self : List[str] ) -> float:
return 1e-4
| 704 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =[False] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =[-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__, 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__, 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 706 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a_ = datasets.load_iris()
a_ = np.array(data['data'])
a_ = np.array(data['target'])
a_ = data['target_names']
a_ , a_ , a_ , a_ = train_test_split(X, y)
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return np.linalg.norm(np.array(UpperCamelCase__ ) - np.array(UpperCamelCase__ ) )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =zip(UpperCamelCase__, UpperCamelCase__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE__ : Tuple =[]
for data_point in data:
SCREAMING_SNAKE_CASE__ : str =euclidean_distance(data_point[0], UpperCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE__ : Optional[Any] =[i[1] for i in sorted(UpperCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE__ : Any =Counter(UpperCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 707 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__ : int =[
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple ={
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__ : Optional[int] =sd.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__ : Any =sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__ : List[Any] =key.replace('''.qkv_proj.''', '''.q_proj.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =key.replace('''.qkv_proj.''', '''.k_proj.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =key.replace('''.qkv_proj.''', '''.v_proj.''' )
SCREAMING_SNAKE_CASE__ : List[str] =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__ : List[Any] =torch.split(UpperCamelCase__, depth // 3, dim=0 )
SCREAMING_SNAKE_CASE__ : Any =q
SCREAMING_SNAKE_CASE__ : Any =k
SCREAMING_SNAKE_CASE__ : Optional[int] =v
del sd[key]
return sd
@torch.no_grad()
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =load_checkpoint(UpperCamelCase__ )
if config is not None:
SCREAMING_SNAKE_CASE__ : Tuple =OPTConfig.from_pretrained(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : str =OPTConfig()
SCREAMING_SNAKE_CASE__ : Dict =OPTModel(UpperCamelCase__ ).half().eval()
model.load_state_dict(UpperCamelCase__ )
# Check results
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
a_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 708 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConditionalDetrFeatureExtractor']
a_ = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase ):
@register_to_config
def __init__( self : Optional[int] , *,
__lowercase : int = 4 , __lowercase : int = 7_68 , __lowercase : int , __lowercase : Dict , ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] =nn.Parameter(torch.zeros(__lowercase ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE__ : Dict =nn.Linear(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =nn.Linear(__lowercase , __lowercase )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE__ : Tuple =clip_extra_context_tokens
SCREAMING_SNAKE_CASE__ : str =nn.Linear(
__lowercase , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE__ : List[str] =nn.Linear(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.LayerNorm(__lowercase )
def __magic_name__ ( self : Optional[int] , *, __lowercase : Optional[int] , __lowercase : Any , __lowercase : str , __lowercase : List[Any] ) -> Tuple:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE__ : List[Any] =image_embeddings.shape[0]
SCREAMING_SNAKE_CASE__ : Any =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =classifier_free_guidance_embeddings.expand(
__lowercase , -1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE__ : Dict =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE__ : Optional[int] =self.embedding_proj(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.clip_image_embeddings_project_to_time_embeddings(__lowercase )
SCREAMING_SNAKE_CASE__ : str =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE__ : Tuple =self.clip_extra_context_tokens_proj(__lowercase )
SCREAMING_SNAKE_CASE__ : str =clip_extra_context_tokens.reshape(__lowercase , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE__ : Optional[Any] =clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.encoder_hidden_states_proj(__lowercase )
SCREAMING_SNAKE_CASE__ : str =self.text_encoder_hidden_states_norm(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 710 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 0 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE__ : int =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE__ : int =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Tuple =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : List[Any] =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : int =VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : str =Text('''CPU''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Any =Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE__ : Dict =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Text('''GPU''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Dict =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Text('''Model''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =[]
SCREAMING_SNAKE_CASE__ : Tuple =[]
for i, rect in enumerate(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] =fill.copy().set_fill(__lowercase , opacity=0.8 )
target.move_to(__lowercase )
model_arr.append(__lowercase )
SCREAMING_SNAKE_CASE__ : str =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
SCREAMING_SNAKE_CASE__ : int =[meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : List[Any] =[meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Optional[Any] =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : List[str] =VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : int =Text('''Disk''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Any =Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
SCREAMING_SNAKE_CASE__ : str =MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Square(0.3 )
input.set_fill(__lowercase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowercase , buff=0.5 )
self.play(Write(__lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowercase , buff=0.02 )
self.play(MoveToTarget(__lowercase ) )
self.play(FadeOut(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Arrow(start=__lowercase , end=__lowercase , color=__lowercase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowercase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE__ : Any =MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
SCREAMING_SNAKE_CASE__ : Tuple ={'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__lowercase ) , Circumscribe(model_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_cpu_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowercase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =AnimationGroup(
FadeOut(__lowercase , run_time=0.5 ) , MoveToTarget(__lowercase , run_time=0.5 ) , FadeIn(__lowercase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE__ : Optional[int] =0.7
self.play(
Circumscribe(model_arr[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_arr[i + 1] , color=__lowercase , **__lowercase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowercase , **__lowercase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE__ : List[Any] =a_c
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowercase ) , FadeOut(__lowercase , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE__ : str =MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) , MoveToTarget(__lowercase ) )
self.wait()
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _a( UpperCamelCase__ : float, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =u
for i in range(1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =temp * (u - i)
return temp
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =int(input('''enter the numbers of values: ''' ) )
SCREAMING_SNAKE_CASE__ : list[list[float]] =[]
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =0
print('''enter the values of parameters in a list: ''' )
SCREAMING_SNAKE_CASE__ : int =list(map(UpperCamelCase__, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =float(input() )
SCREAMING_SNAKE_CASE__ : int =int(input('''enter the value to interpolate: ''' ) )
SCREAMING_SNAKE_CASE__ : Any =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, UpperCamelCase__ ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE__ : List[str] =y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE__ : Tuple =y[0][0]
for i in range(1, UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__, UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a_ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 713 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
a_ = get_tests_dir('fixtures/vocab.json')
a_ = get_tests_dir('fixtures')
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def __magic_name__ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : str ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int =WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Tuple =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : str =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowercase , os.path.join(__lowercase , __lowercase ) )
copyfile(__lowercase , os.path.join(__lowercase , '''vocab.json''' ) )
SCREAMING_SNAKE_CASE__ : List[str] =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE__ : List[Any] =WavaVecaProcessor(__lowercase , __lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowercase , __lowercase ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : str =json.load(__lowercase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__lowercase , __lowercase ) , '''w''' ) as f:
f.write(json.dumps(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : int =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE__ : int =WavaVecaProcessor(__lowercase , __lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowercase , __lowercase ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =json.load(__lowercase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__lowercase , __lowercase ) , '''w''' ) as f:
f.write(json.dumps(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[Any] ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int =WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(__lowercase )
# copy relevant files
copyfile(__lowercase , os.path.join(__lowercase , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(__lowercase , __lowercase ) , '''w''' ) as f:
f.write('''{}''' )
SCREAMING_SNAKE_CASE__ : str =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
SCREAMING_SNAKE_CASE__ : Dict =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : List[str] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase , use_fast=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __magic_name__ ( self : str ) -> Union[str, Any]:
try:
AutoConfig.register('''custom''' , __lowercase )
AutoFeatureExtractor.register(__lowercase , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoProcessor.register(__lowercase , __lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : Any =CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Any =os.path.join(__lowercase , '''vocab.txt''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =CustomTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =CustomProcessor(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : str ) -> Dict:
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = False
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = False
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """AutoFeatureExtractor"""
snake_case_ = """AutoTokenizer"""
snake_case_ = False
try:
AutoConfig.register('''custom''' , __lowercase )
AutoFeatureExtractor.register(__lowercase , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Tuple =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Any =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : str =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __magic_name__ ( cls : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def __magic_name__ ( cls : List[str] ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __magic_name__ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ : Any =WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase , '''test-processor''' ) , push_to_hub=__lowercase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : int =WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(new_processor.feature_extractor , __lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __magic_name__ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase , '''test-processor-org''' ) , push_to_hub=__lowercase , use_auth_token=self._token , organization='''valid_org''' , )
SCREAMING_SNAKE_CASE__ : Any =WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(new_processor.feature_extractor , __lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any =CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(__lowercase , '''vocab.txt''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str =CustomTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : int =CustomProcessor(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
SCREAMING_SNAKE_CASE__ : List[str] =Repository(__lowercase , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(__lowercase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =json.load(__lowercase )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowercase , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase , '''custom_processing.py''' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 714 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __SCREAMING_SNAKE_CASE :
@property
def __magic_name__ ( self : str ) -> Tuple:
return self.get_dummy_input()
@property
def __magic_name__ ( self : int ) -> List[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def __magic_name__ ( self : str , __lowercase : Any=True , __lowercase : Any=False , __lowercase : List[Any]=False , __lowercase : Dict=False , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =4
SCREAMING_SNAKE_CASE__ : Tuple =32
SCREAMING_SNAKE_CASE__ : Optional[int] =(32, 32)
SCREAMING_SNAKE_CASE__ : str =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.device(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =(batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE__ : Any =randn_tensor(__lowercase , generator=__lowercase , device=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''hidden_states''': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE__ : Any =1_28
SCREAMING_SNAKE_CASE__ : List[Any] =randn_tensor((batch_size, temb_channels) , generator=__lowercase , device=__lowercase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.manual_seed(1 )
SCREAMING_SNAKE_CASE__ : Tuple =(randn_tensor(__lowercase , generator=__lowercase , device=__lowercase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE__ : Dict =floats_tensor((batch_size, 32, 32) ).to(__lowercase )
if include_skip_sample:
SCREAMING_SNAKE_CASE__ : str =randn_tensor(((batch_size, 3) + sizes) , generator=__lowercase , device=__lowercase )
return dummy_input
def __magic_name__ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int ={
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 1_28,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE__ : str =32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dummy_input
return init_dict, inputs_dict
def __magic_name__ ( self : int , __lowercase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict =self.block_class(**__lowercase )
unet_block.to(__lowercase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =unet_block(**__lowercase )
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] =output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE__ : int =torch.tensor(__lowercase ).to(__lowercase )
assert torch_all_close(output_slice.flatten() , __lowercase , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str =self.block_class(**__lowercase )
model.to(__lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : str =model(**__lowercase )
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple =output[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.device(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =randn_tensor(output.shape , device=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.nn.functional.mse_loss(__lowercase , __lowercase )
loss.backward()
| 715 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if "model" in orig_key:
SCREAMING_SNAKE_CASE__ : List[Any] =orig_key.replace('''model.''', '''''' )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE__ : Tuple =orig_key.replace('''norm1''', '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[Any] =orig_key.replace('''norm2''', '''output.LayerNorm''' )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE__ : Dict =orig_key.replace('''norm''', '''LayerNorm''' )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =orig_key.split('''.''' )[0].split('''_''' )[-1]
SCREAMING_SNAKE_CASE__ : Dict =orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[int] =orig_key.replace('''mha.attn''', '''attention.self''' )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE__ : List[Any] =orig_key.replace('''mha''', '''attention''' )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE__ : Any =orig_key.replace('''W_q''', '''self.query''' )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE__ : Tuple =orig_key.replace('''W_k''', '''self.key''' )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE__ : Any =orig_key.replace('''W_v''', '''self.value''' )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE__ : Any =orig_key.replace('''ff1''', '''intermediate.dense''' )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE__ : List[Any] =orig_key.replace('''ff2''', '''output.dense''' )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =orig_key.replace('''ff''', '''output.dense''' )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[int] =orig_key.replace('''mlm.mlm_class''', '''cls.predictions.decoder''' )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE__ : int =orig_key.replace('''mlm''', '''cls.predictions.transform''' )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''yoso.''' + orig_key
return orig_key
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : List[str] =orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =val
SCREAMING_SNAKE_CASE__ : Dict =orig_state_dict['''cls.predictions.decoder.bias''']
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model_state_dict''']
SCREAMING_SNAKE_CASE__ : Optional[int] =YosoConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =YosoForMaskedLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =convert_checkpoint_helper(config.max_position_embeddings, UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 716 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : int = 1_0**9 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
SCREAMING_SNAKE_CASE__ : int =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE__ : List[str] =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 718 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Tuple ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : Optional[Any] =ParquetDatasetReader(UpperCamelCase__, cache_dir=UpperCamelCase__, keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__, UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Dict ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : Dict =features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : str =(
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Any =ParquetDatasetReader(UpperCamelCase__, features=UpperCamelCase__, cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__, UpperCamelCase__ )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Any ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : int =ParquetDatasetReader(UpperCamelCase__, cache_dir=UpperCamelCase__, split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__, UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if issubclass(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parquet_path
elif issubclass(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =[parquet_path]
SCREAMING_SNAKE_CASE__ : Optional[int] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : Optional[int] =ParquetDatasetReader(UpperCamelCase__, cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str]=("train",) ):
'''simple docstring'''
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
for split in splits:
SCREAMING_SNAKE_CASE__ : Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : int ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : List[Any] =ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=UpperCamelCase__, keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__, UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : int, UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : List[str] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : Dict =features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : List[Any] =(
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : List[Any] =ParquetDatasetReader({'''train''': parquet_path}, features=UpperCamelCase__, cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__, UpperCamelCase__ )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if split:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={split: parquet_path}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''train'''
SCREAMING_SNAKE_CASE__ : str ={'''train''': parquet_path, '''test''': parquet_path}
SCREAMING_SNAKE_CASE__ : Any =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] =ParquetDatasetReader(UpperCamelCase__, cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__, UpperCamelCase__, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =ParquetDatasetWriter(UpperCamelCase__, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE__ : List[Any] =pq.ParquetFile(tmp_path / '''foo.parquet''' )
SCREAMING_SNAKE_CASE__ : Any =pf.read()
assert dataset.data.table == output_table
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =str(shared_datadir / '''test_image_rgb.jpg''' )
SCREAMING_SNAKE_CASE__ : List[Any] ={'''image''': [image_path]}
SCREAMING_SNAKE_CASE__ : Optional[Any] =Features({'''image''': Image()} )
SCREAMING_SNAKE_CASE__ : List[Any] =Dataset.from_dict(UpperCamelCase__, features=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =ParquetDatasetWriter(UpperCamelCase__, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE__ : List[Any] =Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
assert get_writer_batch_size(UpperCamelCase__ ) == expected
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : list ):
'''simple docstring'''
def merge(UpperCamelCase__ : list, UpperCamelCase__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 720 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 721 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : str , __A : list[str] | None = None , __A : dict[str, float] | None = None , __A : bool = False , ):
a_ : Union[str, Any] = cipher_alphabet or [chr(__A ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a_ : Tuple = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
a_ : Optional[Any] = frequencies_dict
if not case_sensitive:
a_ : Tuple = ciphertext.lower()
# Chi squared statistic values
a_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(__A ) ):
a_ : Union[str, Any] = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a_ : Tuple = (alphabet_letters.index(letter.lower() ) - shift) % len(
__A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a_ : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a_ : List[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a_ : Tuple = decrypted_with_shift.lower().count(__A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a_ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a_ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a_ : Optional[int] = decrypted_with_shift.count(__A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a_ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a_ : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a_ : Any = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a_ : int = min(
__A , key=__A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a_
) , (
a_
) ,
) : Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , __SCREAMING_SNAKE_CASE : CLIPSegProcessor , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , ) -> Union[str, Any]:
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
a_ : int = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
a_ : int = dict(scheduler.config )
a_ : List[str] = 1
a_ : List[Any] = FrozenDict(__SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
a_ : Union[str, Any] = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
a_ : int = dict(scheduler.config )
a_ : List[str] = True
a_ : Optional[int] = FrozenDict(__SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__SCREAMING_SNAKE_CASE , segmentation_processor=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a_ : List[Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Dict , ) -> Dict:
a_ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
a_ : Optional[Any] = self.segmentation_model(**__SCREAMING_SNAKE_CASE )
a_ : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
a_ : Union[str, Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
a_ : str = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , )
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Any=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , ) -> List[str]:
a_ : Dict = size if size is not None else {'''height''': 18, '''width''': 18}
a_ : Optional[int] = parent
a_ : Any = batch_size
a_ : int = num_channels
a_ : Dict = image_size
a_ : Optional[int] = min_resolution
a_ : Optional[Any] = max_resolution
a_ : int = do_resize
a_ : Dict = size
a_ : str = apply_ocr
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Optional[int] = LayoutLMvaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
a_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
a_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
# Initialize image_processing
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
a_ : Optional[Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
# Initialize image_processing
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
a_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a_ : Dict = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
# Initialize image_processing
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a_ : Union[str, Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
# with apply_OCR = True
a_ : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ : List[str] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
a_ : Optional[int] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
a_ : List[Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ : str = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
a_ : Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
a_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
a_ : str = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
__lowerCAmelCase = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["pixel_values"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : str=PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None:
a_ : Union[str, Any] = do_resize
a_ : List[Any] = do_rescale
a_ : Dict = size_divisor
a_ : List[str] = resample
super().__init__(**__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = None , **__SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
a_ , a_ : List[Any] = get_image_size(__SCREAMING_SNAKE_CASE )
# Rounds the height and width down to the closest multiple of size_divisor
a_ : Any = height // size_divisor * size_divisor
a_ : List[str] = width // size_divisor * size_divisor
a_ : Any = resize(__SCREAMING_SNAKE_CASE , (new_h, new_w) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return image
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> np.ndarray:
return rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[TensorType, str]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> BatchFeature:
a_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a_ : Dict = size_divisor if size_divisor is not None else self.size_divisor
a_ : Optional[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
a_ : Dict = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
a_ : Optional[int] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for img in images]
if do_resize:
a_ : List[str] = [self.resize(__SCREAMING_SNAKE_CASE , size_divisor=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
a_ : int = [self.rescale(__SCREAMING_SNAKE_CASE , scale=1 / 255 ) for image in images]
a_ : str = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
a_ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : List[Any] = len(__A )
a_ : Optional[int] = len(__A )
a_ : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
a_ : Any = True
for i in range(__A ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
a_ : Tuple = True
if a[i].islower():
a_ : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
from math import loga
def _UpperCAmelCase ( __A : int ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__A , __A ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bart"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[Any]=5_0265 , __SCREAMING_SNAKE_CASE : int=1024 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : int=4096 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=4096 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=1024 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Dict=2 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[Any]:
a_ : Tuple = vocab_size
a_ : Dict = max_position_embeddings
a_ : List[str] = d_model
a_ : Optional[int] = encoder_ffn_dim
a_ : Dict = encoder_layers
a_ : List[str] = encoder_attention_heads
a_ : int = decoder_ffn_dim
a_ : Dict = decoder_layers
a_ : Tuple = decoder_attention_heads
a_ : str = dropout
a_ : Any = attention_dropout
a_ : int = activation_dropout
a_ : Dict = activation_function
a_ : Tuple = init_std
a_ : Any = encoder_layerdrop
a_ : Any = decoder_layerdrop
a_ : int = classifier_dropout
a_ : Tuple = use_cache
a_ : List[str] = encoder_layers
a_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __SCREAMING_SNAKE_CASE ):
a_ : Dict = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a_ : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
a_ : List[Any] = {0: '''batch'''}
a_ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a_ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
a_ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
a_ : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
a_ , a_ : Any = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
a_ : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
a_ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
a_ : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a_ : str = super().outputs
else:
a_ : str = super(__SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
a_ , a_ : Union[str, Any] = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
a_ : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
a_ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
a_ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Generate decoder inputs
a_ : List[str] = seq_length if not self.use_past else 1
a_ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[Any] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
a_ : Optional[int] = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : List[str] = common_inputs['''input_ids'''].shape
a_ : Dict = common_inputs['''decoder_input_ids'''].shape[1]
a_ , a_ : Tuple = self.num_attention_heads
a_ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ : Dict = decoder_seq_length + 3
a_ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a_ : List[str] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 )
a_ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a_ , a_ : List[str] = self.num_layers
a_ : int = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : int = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers
a_ : int = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
a_ : Union[str, Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
a_ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : Optional[Any] = seqlen + 2
a_ , a_ : str = self.num_layers
a_ , a_ : Any = self.num_attention_heads
a_ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ : Dict = common_inputs['''attention_mask'''].dtype
a_ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
a_ : int = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ : Tuple = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ : Optional[Any] = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
a_ : str = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
a_ : Any = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
a_ : int = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
a_ : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
a_ : Any = self._generate_dummy_inputs_for_causal_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
else:
a_ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
a_ : Union[str, Any] = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
a_ : Any = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : DDIMScheduler ) -> Tuple:
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
a_ : List[str] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__SCREAMING_SNAKE_CASE , )
a_ : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a_ : Dict = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
a_ : Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a_ : Tuple = {}
if accepts_eta:
a_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
a_ : Union[str, Any] = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# predict the noise residual
a_ : Tuple = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
a_ : str = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VAE
a_ : Tuple = self.vqvae.decode(__SCREAMING_SNAKE_CASE ).sample
a_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
a_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ : List[str] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger('transformers.models.speecht5')
def _UpperCAmelCase ( __A : Dict , __A : List[Any] , __A : List[Any] ):
hf_model.apply_weight_norm()
a_ : List[Any] = checkpoint['''input_conv.weight_g''']
a_ : Optional[Any] = checkpoint['''input_conv.weight_v''']
a_ : Dict = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a_ : Optional[int] = checkpoint[f'upsamples.{i}.1.weight_g']
a_ : int = checkpoint[f'upsamples.{i}.1.weight_v']
a_ : Union[str, Any] = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a_ : Union[str, Any] = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
a_ : int = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
a_ : int = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
a_ : Any = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
a_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
a_ : Any = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
a_ : Union[str, Any] = checkpoint['''output_conv.1.weight_g''']
a_ : Optional[int] = checkpoint['''output_conv.1.weight_v''']
a_ : str = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _UpperCAmelCase ( __A : Any , __A : List[Any] , __A : Tuple , __A : Optional[int]=None , __A : Union[str, Any]=None , ):
if config_path is not None:
a_ : Dict = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
a_ : int = SpeechTaHifiGanConfig()
a_ : Any = SpeechTaHifiGan(__A )
a_ : Optional[Any] = torch.load(__A )
load_weights(orig_checkpoint['''model''']['''generator'''] , __A , __A )
a_ : Union[str, Any] = np.load(__A )
a_ : str = stats[0].reshape(-1 )
a_ : Optional[Any] = stats[1].reshape(-1 )
a_ : Any = torch.from_numpy(__A ).float()
a_ : str = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def _UpperCAmelCase ( __A : Optional[int] , __A : Optional[int] ):
a_ : Tuple = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
a_ : List[str] = DatasetInfosDict.from_directory(__A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def _UpperCAmelCase ( __A : List[str] , __A : DatasetInfo ):
a_ : Union[str, Any] = str(__A )
dataset_info.write_to_directory(__A )
a_ : List[Any] = DatasetInfo.from_directory(__A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__A , '''dataset_info.json''' ) )
def _UpperCAmelCase ( ):
a_ : Optional[int] = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
a_ : int = dataset_info._to_yaml_dict()
assert sorted(__A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
a_ : Dict = yaml.safe_dump(__A )
a_ : Optional[int] = yaml.safe_load(__A )
assert dataset_info_yaml_dict == reloaded
def _UpperCAmelCase ( ):
a_ : List[Any] = DatasetInfo()
a_ : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def _UpperCAmelCase ( __A : List[Any] , __A : DatasetInfosDict ):
a_ : Union[str, Any] = str(__A )
dataset_infos_dict.write_to_directory(__A )
a_ : Optional[int] = DatasetInfosDict.from_directory(__A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
a_ : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
a_ : Optional[int] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__A , '''README.md''' ) )
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether tp freeze the encoder."} )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
snake_case__ = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
snake_case__ = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
snake_case__ = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
snake_case__ = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
snake_case__ = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Source language id for translation."} )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Target language id for translation."} )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "# num_beams to use for evaluation."} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def _UpperCAmelCase ( __A : int , __A : Dict , __A : Tuple ):
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(__A , os.path.join(__A , f'{split}_results.json' ) )
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ : int = parser.parse_args_into_dataclasses()
check_output_dir(__A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ : List[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__A , __A , __A ):
assert hasattr(__A , __A ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(__A , __A , getattr(__A , __A ) )
a_ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__A , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__A , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__A , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__A , __A ):
a_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a_ : List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__A )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a_ : Any = SeqaSeqDataset
# Get datasets
a_ : str = (
dataset_class(
__A , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
a_ : Union[str, Any] = (
dataset_class(
__A , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a_ : Union[str, Any] = (
dataset_class(
__A , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a_ : List[Any] = (
build_compute_metrics_fn(data_args.task , __A ) if training_args.predict_with_generate else None
)
a_ : int = SeqaSeqTrainer(
model=__A , args=__A , data_args=__A , train_dataset=__A , eval_dataset=__A , data_collator=SeqaSeqDataCollator(
__A , __A , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__A , tokenizer=__A , )
a_ : Tuple = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
a_ : List[str] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a_ : str = train_result.metrics
a_ : List[Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __A , training_args.output_dir )
all_metrics.update(__A )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
a_ : Any = trainer.evaluate(metric_key_prefix='''val''' )
a_ : List[Any] = data_args.n_val
a_ : List[Any] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __A , training_args.output_dir )
all_metrics.update(__A )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
a_ : Union[str, Any] = trainer.predict(test_dataset=__A , metric_key_prefix='''test''' )
a_ : List[Any] = test_output.metrics
a_ : int = data_args.n_test
if trainer.is_world_process_zero():
a_ : Dict = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __A , training_args.output_dir )
all_metrics.update(__A )
if training_args.predict_with_generate:
a_ : List[Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )
a_ : Dict = lmap(str.strip , __A )
write_txt_file(__A , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__A , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _UpperCAmelCase ( __A : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = 42
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = 42
snake_case__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
__lowerCAmelCase = tuple[int, int]
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : set[int] , __SCREAMING_SNAKE_CASE : Mapping[EdgeT, int] ) -> None:
a_ : set[int] = vertices
a_ : dict[EdgeT, int] = {
(min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : EdgeT , __SCREAMING_SNAKE_CASE : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
a_ : Optional[Any] = weight
def SCREAMING_SNAKE_CASE ( self : Any ) -> Graph:
a_ : Graph = Graph({min(self.vertices )} , {} )
a_ : EdgeT
a_ : int
a_ : EdgeT
a_ : int
while len(subgraph.vertices ) < len(self.vertices ):
a_ : List[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a_ : Any = edge
a_ : List[Any] = weight
subgraph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return subgraph
def _UpperCAmelCase ( __A : str = "p107_network.txt" ):
a_ : str = os.path.abspath(os.path.dirname(__A ) )
a_ : str = os.path.join(__A , __A )
a_ : dict[EdgeT, int] = {}
a_ : list[str]
a_ : int
a_ : int
with open(__A ) as f:
a_ : str = f.read().strip().split('''\n''' )
a_ : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(__A ) ):
for edgea in range(__A ):
if adjaceny_matrix[edgea][edgea] != "-":
a_ : Tuple = int(adjaceny_matrix[edgea][edgea] )
a_ : Graph = Graph(set(range(len(__A ) ) ) , __A )
a_ : Graph = graph.prims_algorithm()
a_ : int = sum(graph.edges.values() )
a_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : Any = 1
a_ : Optional[Any] = 3
a_ : List[str] = (32, 32)
a_ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
return image
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
torch.manual_seed(0 )
a_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
def extract(*__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Tuple ):
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple ) -> List[str]:
a_ : Optional[int] = torch.ones([0] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
self.pixel_values.to(__SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : List[Any] = self.dummy_cond_unet
a_ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
a_ : Dict = self.dummy_vae
a_ : int = self.dummy_text_encoder
a_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
a_ : Dict = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : List[str] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = '''A painting of a squirrel eating a burger'''
a_ : Any = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : List[str] = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
a_ : Union[str, Any] = output.images
a_ : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Any = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
a_ : Any = image[0, -3:, -3:, -1]
a_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ : Union[str, Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
a_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : str = self.dummy_cond_unet
a_ : Any = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = self.dummy_vae
a_ : Any = self.dummy_text_encoder
a_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
a_ : Optional[int] = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : List[str] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Any = '''A painting of a squirrel eating a burger'''
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Optional[int] = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
a_ : str = output.images
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Optional[Any] = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ : List[str] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , __SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
a_ : Any = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ : Optional[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : str = self.dummy_cond_unet
a_ : Optional[Any] = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
a_ : Dict = self.dummy_vae
a_ : Optional[int] = self.dummy_text_encoder
a_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
a_ : int = unet.half()
a_ : Optional[int] = vae.half()
a_ : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
a_ : Union[str, Any] = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Union[str, Any] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : List[str] = '''A painting of a squirrel eating a burger'''
a_ : Dict = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Dict = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE )
a_ : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : str = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
a_ : Tuple = 40_0366_0346
a_ : Union[str, Any] = 7
# without safety guidance (sld_guidance_scale = 0)
a_ : str = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
a_ : str = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
a_ : int = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : Union[str, Any] = output.images
a_ : Optional[int] = image[0, -3:, -3:, -1]
a_ : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE )
a_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : List[str] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = '''padme amidala taking a bath artwork, safe for work, no nudity'''
a_ : Optional[Any] = 27_3497_1755
a_ : Optional[Any] = 7
a_ : Union[str, Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
a_ : Dict = output.images
a_ : str = image[0, -3:, -3:, -1]
a_ : Dict = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a_ : List[str] = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : Optional[int] = output.images
a_ : int = image[0, -3:, -3:, -1]
a_ : List[str] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
a_ : Optional[int] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Any = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
a_ : List[Any] = 10_4435_5234
a_ : List[Any] = 12
a_ : int = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Tuple = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
a_ : Optional[Any] = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : List[str] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
a_ : Tuple = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Tuple = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : List[Any] = output.images
a_ : str = image[0, -3:, -3:, -1]
a_ : Dict = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=18 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=None , ) -> List[Any]:
a_ : List[Any] = size if size is not None else {'''height''': 20, '''width''': 20}
a_ : Optional[int] = parent
a_ : List[str] = batch_size
a_ : List[str] = num_channels
a_ : str = image_size
a_ : Any = min_resolution
a_ : Dict = max_resolution
a_ : List[str] = size
a_ : str = do_normalize
a_ : Optional[Any] = do_convert_rgb
a_ : Any = [512, 1024, 2048, 4096]
a_ : Dict = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Tuple = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
a_ : List[str] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = PixaStructImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
a_ : Tuple = self.image_processing_class(**self.image_processor_dict )
a_ : int = 2048
a_ : Tuple = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# Initialize image_processor
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : List[Any] = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# Initialize image_processor
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
a_ : Any = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : str = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
a_ : str = '''Hello'''
a_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : str = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
# Initialize image_processor
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Union[str, Any] = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
# Initialize image_processor
a_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : Any = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Tuple = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
a_ : Tuple = 3
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processor
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Dict = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : Optional[Any] = 0
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
a_ : Any = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__SCREAMING_SNAKE_CASE ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
a_ : Optional[int] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__SCREAMING_SNAKE_CASE ) , 0 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : Optional[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : List[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
a_ : int = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
a_ : Optional[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''' ) )
a_ : Optional[int] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , tokenizer_type='''bert''' , use_fast=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__SCREAMING_SNAKE_CASE , '''merges.txt''' ) )
a_ : List[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , tokenizer_type='''gpt2''' , use_fast=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''' ) )
a_ : Union[str, Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , tokenizer_type='''bert''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__SCREAMING_SNAKE_CASE , '''merges.txt''' ) )
a_ : int = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , tokenizer_type='''gpt2''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
with pytest.raises(__SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
a_ : List[str] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case , __SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
a_ : Tuple = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
a_ : int = TOKENIZER_MAPPING.values()
a_ : str = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__SCREAMING_SNAKE_CASE )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __SCREAMING_SNAKE_CASE )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : Optional[int] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__SCREAMING_SNAKE_CASE )
a_ : str = '''Hello, world. How are you?'''
a_ : List[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' , tokens[0] )
a_ : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : Optional[int] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
a_ : Dict = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# Check we can load the tokenizer config of an online model.
a_ : int = get_tokenizer_config('''bert-base-cased''' )
a_ : Union[str, Any] = config.pop('''_commit_hash''' , __SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__SCREAMING_SNAKE_CASE , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
a_ : Dict = get_tokenizer_config(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
a_ : Optional[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = get_tokenizer_config(__SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoTokenizer.register(__SCREAMING_SNAKE_CASE , slow_tokenizer_class=__SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(__SCREAMING_SNAKE_CASE , slow_tokenizer_class=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = CustomTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(__SCREAMING_SNAKE_CASE , slow_tokenizer_class=__SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__SCREAMING_SNAKE_CASE , fast_tokenizer_class=__SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__SCREAMING_SNAKE_CASE , slow_tokenizer_class=__SCREAMING_SNAKE_CASE , fast_tokenizer_class=__SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(__SCREAMING_SNAKE_CASE , fast_tokenizer_class=__SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : Optional[int] = BertTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = CustomTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : str = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
a_ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = False
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = NewTokenizer
snake_case__ = False
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoTokenizer.register(__SCREAMING_SNAKE_CASE , slow_tokenizer_class=__SCREAMING_SNAKE_CASE )
AutoTokenizer.register(__SCREAMING_SNAKE_CASE , fast_tokenizer_class=__SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
a_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
a_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
a_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
a_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
a_ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
a_ : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
a_ : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
a_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
a_ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a_ : Optional[int] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
# Make sure we have cached the tokenizer.
a_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
a_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def _UpperCAmelCase ( __A : Optional[Any] , __A : Any ):
a_ : List[str] = int(__A )
assert noofclusters < len(__A )
# Find out the dimensionality
a_ : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
a_ : List[Any] = list(range(len(__A ) ) )
shuffle(__A )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
a_ : Dict = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
a_ : Optional[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
a_ : Dict = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__A )
]
##These nodes will assign the centroid Variables the appropriate
##values
a_ : Optional[int] = tf.placeholder('''float64''' , [dim] )
a_ : str = []
for centroid in centroids:
cent_assigns.append(tf.assign(__A , __A ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
a_ : Optional[Any] = [tf.Variable(0 ) for i in range(len(__A ) )]
##These nodes will assign an assignment Variable the appropriate
##value
a_ : Union[str, Any] = tf.placeholder('''int32''' )
a_ : List[str] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__A , __A ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
a_ : Optional[Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
a_ : Union[str, Any] = tf.reduce_mean(__A , 0 )
##Node for computing Euclidean distances
# Placeholders for input
a_ : Tuple = tf.placeholder('''float''' , [dim] )
a_ : List[Any] = tf.placeholder('''float''' , [dim] )
a_ : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__A , __A ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
a_ : List[Any] = tf.placeholder('''float''' , [noofclusters] )
a_ : Union[str, Any] = tf.argmin(__A , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
a_ : str = tf.initialize_all_variables()
# Initialize all variables
sess.run(__A )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
a_ : Dict = 1_00
for _ in range(__A ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__A ) ):
a_ : List[Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
a_ : Dict = [
sess.run(__A , feed_dict={va: vect, va: sess.run(__A )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
a_ : Dict = sess.run(
__A , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__A ):
# Collect all the vectors assigned to this cluster
a_ : str = [
vectors[i]
for i in range(len(__A ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
a_ : List[Any] = sess.run(
__A , feed_dict={mean_input: array(__A )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
a_ : Any = sess.run(__A )
a_ : Tuple = sess.run(__A )
return centroids, assignments
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "imagegpt"
snake_case__ = ["past_key_values"]
snake_case__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : str=512 + 1 , __SCREAMING_SNAKE_CASE : int=32 * 32 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : List[str]=24 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str="quick_gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=1e-5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
a_ : str = vocab_size
a_ : Optional[int] = n_positions
a_ : Any = n_embd
a_ : Union[str, Any] = n_layer
a_ : List[Any] = n_head
a_ : str = n_inner
a_ : int = activation_function
a_ : Optional[int] = resid_pdrop
a_ : Dict = embd_pdrop
a_ : List[Any] = attn_pdrop
a_ : Dict = layer_norm_epsilon
a_ : List[str] = initializer_range
a_ : Optional[Any] = scale_attn_weights
a_ : str = use_cache
a_ : Dict = scale_attn_by_inverse_layer_idx
a_ : Optional[Any] = reorder_and_upcast_attn
a_ : str = tie_word_embeddings
super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : "FeatureExtractionMixin" , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 32 , ) -> Mapping[str, Any]:
a_ : Tuple = self._generate_dummy_images(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = dict(preprocessor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return inputs
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__lowerCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
__lowerCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : List[Any]="binary" , __SCREAMING_SNAKE_CASE : List[str]=None ) -> List[Any]:
a_ : Optional[Any] = fa_score(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE )
return {"f1": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCAmelCase ( __A : NDArray[floataa] , __A : NDArray[floataa] , __A : list[int] , __A : int , ):
a_ , a_ : int = coefficient_matrix.shape
a_ , a_ : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
a_ : Dict = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(__A )
if colsa != 1:
a_ : Tuple = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(__A )
if rowsa != rowsa:
a_ : List[Any] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(__A )
if len(__A ) != rowsa:
a_ : Tuple = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'matrix but received {len(__A )} and {rowsa}'
)
raise ValueError(__A )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
a_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
a_ , a_ : Optional[Any] = table.shape
strictly_diagonally_dominant(__A )
# Iterates the whole matrix for given number of times
for _ in range(__A ):
a_ : List[str] = []
for row in range(__A ):
a_ : Optional[Any] = 0
for col in range(__A ):
if col == row:
a_ : Union[str, Any] = table[row][col]
elif col == cols - 1:
a_ : Union[str, Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a_ : Tuple = (temp + val) / denom
new_val.append(__A )
a_ : str = new_val
return [float(__A ) for i in new_val]
def _UpperCAmelCase ( __A : NDArray[floataa] ):
a_ , a_ : Dict = table.shape
a_ : int = True
for i in range(0 , __A ):
a_ : Optional[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : dict ):
a_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
a_ : set[int] = set()
return any(
node not in visited and depth_first_search(__A , __A , __A , __A )
for node in graph )
def _UpperCAmelCase ( __A : dict , __A : int , __A : set , __A : set ):
visited.add(__A )
rec_stk.add(__A )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__A , __A , __A , __A ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__A )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
import random
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : str ) -> tuple[list[int], list[int]]:
a_ : List[Any] = [ord(__SCREAMING_SNAKE_CASE ) for i in text]
a_ : str = []
a_ : Dict = []
for i in plain:
a_ : str = random.randint(1 , 300 )
a_ : Union[str, Any] = (i + k) * k
cipher.append(__SCREAMING_SNAKE_CASE )
key.append(__SCREAMING_SNAKE_CASE )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : list[int] ) -> str:
a_ : List[Any] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : Dict = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__SCREAMING_SNAKE_CASE ) )
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "openai-gpt"
snake_case__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=4_0478 , __SCREAMING_SNAKE_CASE : List[str]=512 , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-5 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : int="cls_index" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=0.1 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> int:
a_ : Optional[int] = vocab_size
a_ : str = n_positions
a_ : Tuple = n_embd
a_ : Dict = n_layer
a_ : Optional[Any] = n_head
a_ : List[str] = afn
a_ : str = resid_pdrop
a_ : Tuple = embd_pdrop
a_ : List[Any] = attn_pdrop
a_ : List[Any] = layer_norm_epsilon
a_ : Tuple = initializer_range
a_ : Optional[Any] = summary_type
a_ : List[str] = summary_use_proj
a_ : List[str] = summary_activation
a_ : List[str] = summary_first_dropout
a_ : Optional[Any] = summary_proj_to_labels
super().__init__(**__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__lowerCAmelCase = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__lowerCAmelCase = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCAmelCase ( __A : Union[str, Any] ):
def remove_articles(__A : Optional[Any] ):
a_ : List[Any] = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(__A , ''' ''' , __A )
def white_space_fix(__A : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__A : List[Any] ):
a_ : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _UpperCAmelCase ( __A : str , __A : Optional[Any] ):
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _UpperCAmelCase ( __A : Optional[int] , __A : List[Any] ):
a_ : str = [any(compute_exact(__A , __A ) for ref in refs ) for pred, refs in zip(__A , __A )]
return (sum(__A ) / len(__A )) * 1_00
def _UpperCAmelCase ( __A : Optional[Any] , __A : Any , __A : Any , __A : List[Any] ):
a_ : str = [rgram for rgrams in rgramslist for rgram in rgrams]
a_ : Optional[int] = Counter(__A )
a_ : Dict = Counter(__A )
a_ : Tuple = Counter()
for sgram, scount in sgramcounter.items():
a_ : Dict = scount * numref
a_ : List[str] = Counter(__A )
a_ : Dict = Counter()
for cgram, ccount in cgramcounter.items():
a_ : List[str] = ccount * numref
# KEEP
a_ : str = sgramcounter_rep & cgramcounter_rep
a_ : int = keepgramcounter_rep & rgramcounter
a_ : Tuple = sgramcounter_rep & rgramcounter
a_ : Dict = 0
a_ : str = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a_ : List[str] = 1
a_ : str = 1
if len(__A ) > 0:
a_ : List[Any] = keeptmpscorea / len(__A )
if len(__A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
a_ : int = keeptmpscorea / sum(keepgramcounterall_rep.values() )
a_ : List[Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
a_ : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
a_ : Optional[int] = sgramcounter_rep - cgramcounter_rep
a_ : Dict = delgramcounter_rep - rgramcounter
a_ : Optional[Any] = sgramcounter_rep - rgramcounter
a_ : int = 0
a_ : Any = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a_ : int = 1
if len(__A ) > 0:
a_ : Tuple = deltmpscorea / len(__A )
# ADDITION
a_ : Optional[Any] = set(__A ) - set(__A )
a_ : Optional[int] = set(__A ) & set(__A )
a_ : Any = set(__A ) - set(__A )
a_ : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a_ : List[str] = 1
a_ : Optional[Any] = 1
if len(__A ) > 0:
a_ : Optional[int] = addtmpscore / len(__A )
if len(__A ) > 0:
a_ : Dict = addtmpscore / len(__A )
a_ : str = 0
if addscore_precision > 0 or addscore_recall > 0:
a_ : List[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCAmelCase ( __A : Optional[Any] , __A : List[str] , __A : Optional[Any] ):
a_ : Tuple = len(__A )
a_ : List[str] = ssent.split(''' ''' )
a_ : Optional[Any] = csent.split(''' ''' )
a_ : Dict = []
a_ : Dict = []
a_ : List[str] = []
a_ : Any = []
a_ : int = []
a_ : int = []
a_ : int = []
a_ : List[str] = []
a_ : int = []
a_ : str = []
for rsent in rsents:
a_ : int = rsent.split(''' ''' )
a_ : Union[str, Any] = []
a_ : Dict = []
a_ : Any = []
ragramslist.append(__A )
for i in range(0 , len(__A ) - 1 ):
if i < len(__A ) - 1:
a_ : Dict = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__A )
if i < len(__A ) - 2:
a_ : Tuple = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__A )
if i < len(__A ) - 3:
a_ : Any = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__A )
ragramslist.append(__A )
ragramslist.append(__A )
ragramslist.append(__A )
for i in range(0 , len(__A ) - 1 ):
if i < len(__A ) - 1:
a_ : Dict = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__A )
if i < len(__A ) - 2:
a_ : Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__A )
if i < len(__A ) - 3:
a_ : Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__A )
for i in range(0 , len(__A ) - 1 ):
if i < len(__A ) - 1:
a_ : Optional[int] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__A )
if i < len(__A ) - 2:
a_ : Union[str, Any] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__A )
if i < len(__A ) - 3:
a_ : List[Any] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__A )
((a_) , (a_) , (a_)) : str = SARIngram(__A , __A , __A , __A )
((a_) , (a_) , (a_)) : List[str] = SARIngram(__A , __A , __A , __A )
((a_) , (a_) , (a_)) : Union[str, Any] = SARIngram(__A , __A , __A , __A )
((a_) , (a_) , (a_)) : Optional[Any] = SARIngram(__A , __A , __A , __A )
a_ : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
a_ : List[str] = sum([delascore, delascore, delascore, delascore] ) / 4
a_ : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
a_ : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCAmelCase ( __A : Any , __A : bool = True , __A : str = "13a" , __A : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
a_ : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
a_ : Dict = sacrebleu.metrics.bleu._get_tokenizer(__A )()(__A )
else:
a_ : Union[str, Any] = sacrebleu.TOKENIZERS[tokenizer]()(__A )
elif tokenizer == "moses":
a_ : Dict = sacremoses.MosesTokenizer().tokenize(__A , return_str=__A , escape=__A )
elif tokenizer == "penn":
a_ : Tuple = sacremoses.MosesTokenizer().penn_tokenize(__A , return_str=__A )
else:
a_ : Any = sentence
if not return_str:
a_ : Optional[Any] = normalized_sent.split()
return normalized_sent
def _UpperCAmelCase ( __A : Dict , __A : int , __A : Optional[Any] ):
if not (len(__A ) == len(__A ) == len(__A )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
a_ : Optional[Any] = 0
for src, pred, refs in zip(__A , __A , __A ):
sari_score += SARIsent(normalize(__A ) , normalize(__A ) , [normalize(__A ) for sent in refs] )
a_ : str = sari_score / len(__A )
return 1_00 * sari_score
def _UpperCAmelCase ( __A : Dict , __A : str , __A : str="exp" , __A : int=None , __A : int=False , __A : Union[str, Any]=False , __A : str=False , ):
a_ : Optional[Any] = len(references[0] )
if any(len(__A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
a_ : Dict = [[refs[i] for refs in references] for i in range(__A )]
a_ : List[str] = sacrebleu.corpus_bleu(
__A , __A , smooth_method=__A , smooth_value=__A , force=__A , lowercase=__A , use_effective_order=__A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
a_ : Any = {}
result.update({'''sari''': compute_sari(sources=__SCREAMING_SNAKE_CASE , predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )} )
result.update({'''exact''': compute_em(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )} )
return result
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowerCAmelCase = 'bert-base-cased'
__lowerCAmelCase = 'google/pegasus-xsum'
__lowerCAmelCase = [' Sam ate lunch today.', 'Sams lunch ingredients.']
__lowerCAmelCase = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
__lowerCAmelCase = 'patrickvonplaten/t5-tiny-random'
__lowerCAmelCase = 'sshleifer/bart-tiny-random'
__lowerCAmelCase = 'sshleifer/tiny-mbart'
__lowerCAmelCase = 'sshleifer/tiny-marian-en-de'
def _UpperCAmelCase ( __A : Path , __A : list ):
a_ : Optional[int] = '''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
def _UpperCAmelCase ( __A : str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__A , f'{split}.source' ) , __A )
_dump_articles(os.path.join(__A , f'{split}.target' ) , __A )
return tmp_dir
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
a_ : Any = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a_ : Tuple = max(len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
a_ : str = max(len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
a_ : List[Any] = 4
a_ : List[str] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
a_ , a_ : List[str] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
a_ : List[str] = SeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=__SCREAMING_SNAKE_CASE , type_path='''train''' , max_source_length=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , )
a_ : List[str] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
a_ : List[Any] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Any:
a_ : Tuple = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a_ : Any = max(len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
a_ : Union[str, Any] = max(len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
a_ : int = 4
a_ : Optional[int] = LegacySeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=__SCREAMING_SNAKE_CASE , type_path='''train''' , max_source_length=20 , max_target_length=__SCREAMING_SNAKE_CASE , )
a_ : Optional[int] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : Optional[int] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
a_ : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
a_ : int = tmp_dir.joinpath('''train.source''' ).open().readlines()
a_ : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 128 , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
a_ : List[Any] = {x.name for x in save_dir.iterdir()}
a_ : List[Any] = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 1
assert len(packed_examples[0] ) == sum(len(__SCREAMING_SNAKE_CASE ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
if not FAIRSEQ_AVAILABLE:
return
a_ , a_ , a_ : Optional[Any] = self._get_dataset(max_len=64 )
a_ : Union[str, Any] = 64
a_ : int = ds.make_dynamic_sampler(__SCREAMING_SNAKE_CASE , required_batch_size_multiple=__SCREAMING_SNAKE_CASE )
a_ : Dict = [len(__SCREAMING_SNAKE_CASE ) for x in batch_sampler]
assert len(set(__SCREAMING_SNAKE_CASE ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) # no dropped or added examples
a_ : List[str] = DataLoader(__SCREAMING_SNAKE_CASE , batch_sampler=__SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
a_ : Tuple = []
a_ : List[str] = []
for batch in data_loader:
a_ : Union[str, Any] = batch['''input_ids'''].shape
a_ : Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
a_ : List[str] = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(__SCREAMING_SNAKE_CASE )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__SCREAMING_SNAKE_CASE )
assert num_src_per_batch[0] == max(__SCREAMING_SNAKE_CASE )
if failures:
raise AssertionError(f'too many tokens in {len(__SCREAMING_SNAKE_CASE )} batches' )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ , a_ , a_ : Tuple = self._get_dataset(max_len=512 )
a_ : Union[str, Any] = 2
a_ : List[Any] = ds.make_sortish_sampler(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
a_ : Optional[Any] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 , sampler=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]="input_ids" ):
return [batch[k].eq(__SCREAMING_SNAKE_CASE ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__SCREAMING_SNAKE_CASE , k='''labels''' ) ) < sum(count_pad_tokens(__SCREAMING_SNAKE_CASE , k='''labels''' ) )
assert sum(count_pad_tokens(__SCREAMING_SNAKE_CASE ) ) < sum(count_pad_tokens(__SCREAMING_SNAKE_CASE ) )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Optional[int]=1000 , __SCREAMING_SNAKE_CASE : Union[str, Any]=128 ) -> Optional[int]:
if os.getenv('''USE_REAL_DATA''' , __SCREAMING_SNAKE_CASE ):
a_ : int = '''examples/seq2seq/wmt_en_ro'''
a_ : Tuple = max_len * 2 * 64
if not Path(__SCREAMING_SNAKE_CASE ).joinpath('''train.len''' ).exists():
save_len_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
a_ : Optional[int] = '''examples/seq2seq/test_data/wmt_en_ro'''
a_ : Tuple = max_len * 4
save_len_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : int = SeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=__SCREAMING_SNAKE_CASE , type_path='''train''' , max_source_length=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , n_obs=__SCREAMING_SNAKE_CASE , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ , a_ , a_ : Union[str, Any] = self._get_dataset()
a_ : Optional[Any] = set(DistributedSortishSampler(__SCREAMING_SNAKE_CASE , 256 , num_replicas=2 , rank=0 , add_extra_examples=__SCREAMING_SNAKE_CASE ) )
a_ : int = set(DistributedSortishSampler(__SCREAMING_SNAKE_CASE , 256 , num_replicas=2 , rank=1 , add_extra_examples=__SCREAMING_SNAKE_CASE ) )
assert idsa.intersection(__SCREAMING_SNAKE_CASE ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
a_ : Tuple = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE )
if tok_name == MBART_TINY:
a_ : Union[str, Any] = SeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
a_ : Optional[int] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
a_ : int = SeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
a_ : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__SCREAMING_SNAKE_CASE ) == 1 if tok_name == BART_TINY else len(__SCREAMING_SNAKE_CASE ) == 0
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "mask2former"
snake_case__ = ["swin"]
snake_case__ = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Dict] = None , __SCREAMING_SNAKE_CASE : int = 256 , __SCREAMING_SNAKE_CASE : int = 256 , __SCREAMING_SNAKE_CASE : int = 256 , __SCREAMING_SNAKE_CASE : int = 1024 , __SCREAMING_SNAKE_CASE : str = "relu" , __SCREAMING_SNAKE_CASE : int = 6 , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 2048 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : int = 4 , __SCREAMING_SNAKE_CASE : int = 255 , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 2.0 , __SCREAMING_SNAKE_CASE : float = 5.0 , __SCREAMING_SNAKE_CASE : float = 5.0 , __SCREAMING_SNAKE_CASE : int = 1_2544 , __SCREAMING_SNAKE_CASE : float = 3.0 , __SCREAMING_SNAKE_CASE : float = 0.75 , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : float = 1.0 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : List[int] = [4, 8, 16, 32] , __SCREAMING_SNAKE_CASE : bool = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> int:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
a_ : Union[str, Any] = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__SCREAMING_SNAKE_CASE , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : List[Any] = backbone_config.pop('''model_type''' )
a_ : Any = CONFIG_MAPPING[backbone_model_type]
a_ : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
a_ : Optional[int] = backbone_config
a_ : List[str] = feature_size
a_ : Optional[Any] = mask_feature_size
a_ : Dict = hidden_dim
a_ : List[str] = encoder_feedforward_dim
a_ : Dict = activation_function
a_ : Tuple = encoder_layers
a_ : int = decoder_layers
a_ : Any = num_attention_heads
a_ : Optional[int] = dropout
a_ : Optional[int] = dim_feedforward
a_ : Any = pre_norm
a_ : Optional[int] = enforce_input_projection
a_ : int = common_stride
a_ : int = ignore_value
a_ : str = num_queries
a_ : str = no_object_weight
a_ : List[str] = class_weight
a_ : Union[str, Any] = mask_weight
a_ : str = dice_weight
a_ : List[Any] = train_num_points
a_ : str = oversample_ratio
a_ : int = importance_sample_ratio
a_ : str = init_std
a_ : Dict = init_xavier_std
a_ : Optional[Any] = use_auxiliary_loss
a_ : Optional[Any] = feature_strides
a_ : Optional[Any] = output_auxiliary_logits
a_ : Dict = decoder_layers
super().__init__(**__SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
return cls(
backbone_config=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, any]:
a_ : List[str] = copy.deepcopy(self.__dict__ )
a_ : Optional[Any] = self.backbone_config.to_dict()
a_ : List[str] = self.__class__.model_type
return output
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int , __A : int ):
a_ : List[Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
a_ : List[Any] = n - k
# Calculate C(n,k)
for i in range(__A ):
result *= n - i
result //= i + 1
return result
def _UpperCAmelCase ( __A : int ):
return binomial_coefficient(2 * node_count , __A ) // (node_count + 1)
def _UpperCAmelCase ( __A : int ):
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
a_ : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _UpperCAmelCase ( __A : int ):
return catalan_number(__A ) * factorial(__A )
if __name__ == "__main__":
__lowerCAmelCase = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def _UpperCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@require_beam
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a_ : Any = DummyBeamDataset(cache_dir=__SCREAMING_SNAKE_CASE , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
a_ : List[Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __SCREAMING_SNAKE_CASE )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
import apache_beam as beam
a_ : str = beam.io.parquetio.WriteToParquet
a_ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a_ : Union[str, Any] = DummyBeamDataset(cache_dir=__SCREAMING_SNAKE_CASE , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
a_ : Optional[Any] = partial(__SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
a_ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __SCREAMING_SNAKE_CASE )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a_ : int = DummyBeamDataset(cache_dir=__SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a_ : int = NestedBeamDataset(cache_dir=__SCREAMING_SNAKE_CASE , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
a_ : List[str] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __SCREAMING_SNAKE_CASE )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "transfo-xl"
snake_case__ = ["mems"]
snake_case__ = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=26_7735 , __SCREAMING_SNAKE_CASE : List[str]=[2_0000, 4_0000, 20_0000] , __SCREAMING_SNAKE_CASE : Any=1024 , __SCREAMING_SNAKE_CASE : int=1024 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Tuple=64 , __SCREAMING_SNAKE_CASE : Any=4096 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=18 , __SCREAMING_SNAKE_CASE : List[Any]=1600 , __SCREAMING_SNAKE_CASE : Any=1000 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]="normal" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict=0.01 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Dict=1e-5 , __SCREAMING_SNAKE_CASE : List[str]=0 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
a_ : Union[str, Any] = vocab_size
a_ : Dict = []
self.cutoffs.extend(__SCREAMING_SNAKE_CASE )
if proj_share_all_but_first:
a_ : Tuple = [False] + [True] * len(self.cutoffs )
else:
a_ : int = [False] + [False] * len(self.cutoffs )
a_ : Dict = d_model
a_ : Tuple = d_embed
a_ : Union[str, Any] = d_head
a_ : Optional[int] = d_inner
a_ : Union[str, Any] = div_val
a_ : int = pre_lnorm
a_ : List[Any] = n_layer
a_ : List[Any] = n_head
a_ : Dict = mem_len
a_ : List[Any] = same_length
a_ : Dict = attn_type
a_ : Dict = clamp_len
a_ : Union[str, Any] = sample_softmax
a_ : Dict = adaptive
a_ : Optional[Any] = dropout
a_ : str = dropatt
a_ : Optional[Any] = untie_r
a_ : str = init
a_ : Optional[Any] = init_range
a_ : int = proj_init_std
a_ : Union[str, Any] = init_std
a_ : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__lowerCAmelCase = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__lowerCAmelCase = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCAmelCase ( ):
a_ : int = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
a_ : List[str] = bs[:]
a_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
a_ : str = [chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def _UpperCAmelCase ( __A : Optional[int] ):
a_ : int = set()
a_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ : Optional[int] = char
return pairs
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]="replace" , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : int="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : int="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : int="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
a_ : Dict = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else bos_token
a_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token
a_ : Optional[int] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else sep_token
a_ : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cls_token
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token
a_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a_ : str = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
a_ : Optional[Any] = json.load(__SCREAMING_SNAKE_CASE )
a_ : str = {v: k for k, v in self.encoder.items()}
a_ : Union[str, Any] = errors # how to handle errors in decoding
a_ : int = bytes_to_unicode()
a_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
a_ : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
a_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
a_ : Dict = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Dict = {}
a_ : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a_ : str = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
if token in self.cache:
return self.cache[token]
a_ : Tuple = tuple(__SCREAMING_SNAKE_CASE )
a_ : Dict = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
a_ : Dict = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ : Optional[Any] = bigram
a_ : List[Any] = []
a_ : Union[str, Any] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
a_ : Optional[Any] = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ : List[Any] = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : List[Any] = tuple(__SCREAMING_SNAKE_CASE )
a_ : Dict = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
a_ : Union[str, Any] = get_pairs(__SCREAMING_SNAKE_CASE )
a_ : str = ''' '''.join(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = word
return word
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int:
a_ : int = []
for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE ):
a_ : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> Any:
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
return self.decoder.get(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
a_ : Tuple = ''''''.join(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
a_ : int = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
a_ : Tuple = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
a_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : Union[str, Any] = [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
a_ : Dict = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
a_ : Any = ''' ''' + text
return (text, kwargs)
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _UpperCAmelCase ( __A : List[Any] , __A : Tuple=10 ):
a_ : List[str] = []
for _ in range(__A ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _UpperCAmelCase ( __A : Dict , __A : Tuple=10 ):
a_ : Dict = []
for step in range(__A ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Tuple = os.path.join(__A , '''schedule.bin''' )
torch.save(scheduler.state_dict() , __A )
a_ : List[Any] = torch.load(__A )
scheduler.load_state_dict(__A )
return lrs
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
a_ : Dict = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a_ : Any = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
a_ : int = criterion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__SCREAMING_SNAKE_CASE )
a_ : Dict = torch.tensor([0.4, 0.2, -0.5] )
a_ : int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a_ : Union[str, Any] = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=__SCREAMING_SNAKE_CASE , scale_parameter=__SCREAMING_SNAKE_CASE , warmup_init=__SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
a_ : Tuple = criterion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ = nn.Linear(50 , 50 ) if is_torch_available() else None
snake_case__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
snake_case__ = 10
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> Dict:
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE , msg=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
a_ : List[str] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a_ : Union[str, Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
a_ , a_ : str = data
a_ : Dict = scheduler_func(self.optimizer , **__SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
a_ : Optional[Any] = unwrap_schedule(__SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'failed for {scheduler_func} in normal scheduler' , )
a_ : Optional[int] = scheduler_func(self.optimizer , **__SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
a_ : Optional[int] = unwrap_and_save_reload_schedule(__SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , msg=f'failed for {scheduler_func} in save and reload' )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Any:
a_ : List[str] = fn
def __call__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
return self.fn(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Dict:
a_ : str = list(map(self , scheduler.lr_lambdas ) )
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _UpperCAmelCase ( __A : int , __A : str , __A : Optional[int] , __A : List[str] , __A : Optional[int] , __A : str , __A : List[str] , __A : int , __A : List[Any] , __A : Union[str, Any] , __A : str , __A : List[Any] , ):
a_ : Union[str, Any] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
a_ , a_ : List[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
a_ : Union[str, Any] = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
assert base_extractor.is_extractable(__A )
a_ : Optional[Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__A , __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a_ : str = file_path.read_text(encoding='''utf-8''' )
else:
a_ : str = output_path.read_text(encoding='''utf-8''' )
a_ : Tuple = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _UpperCAmelCase ( __A : Dict , __A : int , __A : str , __A : Optional[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : Any , __A : Optional[int] , __A : Dict , __A : List[str] , __A : Union[str, Any] , ):
a_ : Optional[Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
a_ : Optional[int] = input_paths[compression_format]
if input_path is None:
a_ : List[Any] = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
a_ : Optional[int] = Extractor.infer_extractor_format(__A )
assert extractor_format is not None
a_ : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__A , __A , __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a_ : str = file_path.read_text(encoding='''utf-8''' )
else:
a_ : Any = output_path.read_text(encoding='''utf-8''' )
a_ : List[Any] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _UpperCAmelCase ( __A : List[Any] , __A : Dict ):
import tarfile
a_ : Tuple = tmp_path / '''data_dot_dot'''
directory.mkdir()
a_ : List[str] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__A , '''w''' ) as f:
f.add(__A , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def _UpperCAmelCase ( __A : Optional[int] ):
import tarfile
a_ : List[Any] = tmp_path / '''data_sym_link'''
directory.mkdir()
a_ : Any = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__A )
with tarfile.TarFile(__A , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def _UpperCAmelCase ( __A : List[Any] , __A : Tuple , __A : Optional[int] , __A : Optional[int] , __A : Any , __A : Union[str, Any] ):
a_ : int = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
a_ : int = insecure_tar_files[insecure_tar_file]
a_ : Tuple = tmp_path / '''extracted'''
TarExtractor.extract(__A , __A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _UpperCAmelCase ( __A : Tuple ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
a_ : int = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
a_ : int = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__A )
assert zipfile.is_zipfile(str(__A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__A ) # but we're right
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
a_ : int = None
a_ : int = None
a_ : List[str] = graph
self._normalize_graph(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = len(__SCREAMING_SNAKE_CASE )
a_ : int = None
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
if sources is int:
a_ : Optional[int] = [sources]
if sinks is int:
a_ : Optional[int] = [sinks]
if len(__SCREAMING_SNAKE_CASE ) == 0 or len(__SCREAMING_SNAKE_CASE ) == 0:
return
a_ : List[Any] = sources[0]
a_ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__SCREAMING_SNAKE_CASE ) > 1 or len(__SCREAMING_SNAKE_CASE ) > 1:
a_ : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
a_ : Tuple = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
a_ : Tuple = max_input_flow
a_ : List[Any] = 0
a_ : int = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
a_ : Any = max_input_flow
a_ : str = size - 1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
a_ : Any = algorithm(self )
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
a_ : Dict = flow_network
a_ : Tuple = flow_network.verticesCount
a_ : Tuple = flow_network.sourceIndex
a_ : List[Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
a_ : Dict = flow_network.graph
a_ : str = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
if not self.executed:
self._algorithm()
a_ : Any = True
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Dict:
super().__init__(__SCREAMING_SNAKE_CASE )
# use this to save your result
a_ : Dict = -1
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
a_ : List[str] = [[0] * self.verticies_count for i in range(self.verticies_count )]
a_ : Dict = [0] * self.verticies_count
a_ : Any = [0] * self.verticies_count
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
a_ : Optional[int] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
a_ : List[Any] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
a_ : Optional[int] = vertices_list[i]
a_ : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__SCREAMING_SNAKE_CASE ) )
a_ : Dict = 0
else:
i += 1
a_ : Dict = sum(self.preflow[self.source_index] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.relabel(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Tuple:
a_ : str = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
a_ : List[str] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
a_ : Optional[Any] = self.heights[to_index]
if min_height is not None:
a_ : Union[str, Any] = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase = [0]
__lowerCAmelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
@staticmethod
def SCREAMING_SNAKE_CASE ( ) -> Any:
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Any:
return f'`pip install {cls.pip_package or cls.name}`'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "optuna"
@staticmethod
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
return is_optuna_available()
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
return run_hp_search_optuna(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> int:
return default_hp_space_optuna(__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "ray"
snake_case__ = "'ray[tune]'"
@staticmethod
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return is_ray_available()
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
return run_hp_search_ray(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
return default_hp_space_ray(__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "sigopt"
@staticmethod
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
return is_sigopt_available()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
return run_hp_search_sigopt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> List[str]:
return default_hp_space_sigopt(__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "wandb"
@staticmethod
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
return is_wandb_available()
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
return run_hp_search_wandb(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
return default_hp_space_wandb(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _UpperCAmelCase ( ):
a_ : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__A ) > 0:
a_ : List[Any] = available_backends[0].name
if len(__A ) > 1:
logger.info(
f'{len(__A )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
__lowerCAmelCase = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
__lowerCAmelCase = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def _UpperCAmelCase ( __A : float , __A : str , __A : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
a_ : int = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(__A )}'
)
raise ValueError(__A )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ): # This function is recursive
a_ : Any = len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a_ : Optional[int] = array[0]
a_ : Union[str, Any] = False
a_ : List[Any] = 1
a_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
a_ : Optional[int] = True
a_ : Dict = [element for element in array[i:] if element >= array[i]]
a_ : Any = longest_subsequence(__A )
if len(__A ) > len(__A ):
a_ : Tuple = temp_array
else:
i += 1
a_ : str = [element for element in array[1:] if element >= pivot]
a_ : Optional[int] = [pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
__lowerCAmelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
__lowerCAmelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
__lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCAmelCase = os.path.join(os.path.expanduser('~'), '.cache')
__lowerCAmelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _UpperCAmelCase ( __A : Dict , __A : List[Any]=False ):
a_ : List[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__A , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def _UpperCAmelCase ( __A : List[str] , __A : str ):
os.makedirs(__A , exist_ok=__A )
hf_hub_download(repo_id=__A , filename=__A , local_dir=__A )
def _UpperCAmelCase ( __A : Optional[Any] , __A : str , __A : List[Any]=False , __A : str="text" ):
if model_type == "text":
a_ : Any = BarkSemanticModel
a_ : Optional[int] = BarkSemanticConfig
a_ : Tuple = BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ : Union[str, Any] = BarkCoarseModel
a_ : Union[str, Any] = BarkCoarseConfig
a_ : List[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
a_ : List[str] = BarkFineModel
a_ : Any = BarkFineConfig
a_ : Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ : Optional[Any] = f'{model_type}_small' if use_small else model_type
a_ : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__A ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
a_ : Union[str, Any] = torch.load(__A , map_location=__A )
# this is a hack
a_ : List[str] = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
a_ : Optional[int] = model_args['''vocab_size''']
a_ : Optional[Any] = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ : List[str] = model_args.pop('''n_head''' )
a_ : List[str] = model_args.pop('''n_embd''' )
a_ : Any = model_args.pop('''n_layer''' )
a_ : Optional[int] = ConfigClass(**checkpoint['''model_args'''] )
a_ : List[Any] = ModelClass(config=__A )
a_ : int = GenerationConfigClass()
a_ : Tuple = model_generation_config
a_ : Tuple = checkpoint['''model''']
# fixup checkpoint
a_ : Any = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(__A ):
# replace part of the key with corresponding layer name in HF implementation
a_ : Optional[int] = k[len(__A ) :]
for old_layer_name in new_layer_name_dict:
a_ : Optional[Any] = new_k.replace(__A , new_layer_name_dict[old_layer_name] )
a_ : Union[str, Any] = state_dict.pop(__A )
a_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ : Optional[int] = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
a_ : int = set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ : Tuple = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(__A ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(__A ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(__A , strict=__A )
a_ : Union[str, Any] = model.num_parameters(exclude_embeddings=__A )
a_ : List[str] = checkpoint['''best_val_loss'''].item()
logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(__A , 3 )} loss' )
model.eval()
model.to(__A )
del checkpoint, state_dict
return model
def _UpperCAmelCase ( __A : List[str] , __A : Tuple=False , __A : Optional[Any]="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ : Optional[int] = '''cpu''' # do conversion on cpu
a_ : Tuple = _get_ckpt_path(__A , use_small=__A )
a_ : Any = _load_model(__A , __A , model_type=__A , use_small=__A )
# load bark initial model
a_ : int = _bark_load_model(__A , '''cpu''' , model_type=__A , use_small=__A )
if model_type == "text":
a_ : Dict = bark_model['''model''']
if model.num_parameters(exclude_embeddings=__A ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
a_ : List[str] = 5
a_ : List[Any] = 10
if model_type in ["text", "coarse"]:
a_ : Union[str, Any] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
a_ : Union[str, Any] = bark_model(__A )[0]
a_ : List[str] = model(__A )
# take last logits
a_ : Optional[Any] = output_new_model_total.logits[:, [-1], :]
else:
a_ : Tuple = 3
a_ : Tuple = 8
a_ : Any = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ : int = model(__A , __A )
a_ : List[Any] = bark_model(__A , __A )
a_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
def _UpperCAmelCase ( __A : Dict , __A : Optional[Any] , __A : Tuple , __A : Dict , __A : Tuple , __A : int , ):
a_ : Union[str, Any] = os.path.join(__A , __A )
a_ : Tuple = BarkSemanticConfig.from_pretrained(os.path.join(__A , '''config.json''' ) )
a_ : str = BarkCoarseConfig.from_pretrained(os.path.join(__A , '''config.json''' ) )
a_ : Dict = BarkFineConfig.from_pretrained(os.path.join(__A , '''config.json''' ) )
a_ : Union[str, Any] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
a_ : List[str] = BarkSemanticModel.from_pretrained(__A )
a_ : List[str] = BarkCoarseModel.from_pretrained(__A )
a_ : str = BarkFineModel.from_pretrained(__A )
a_ : Optional[int] = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
a_ : Tuple = BarkConfig.from_sub_model_configs(
__A , __A , __A , __A )
a_ : Dict = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ : List[Any] = BarkModel(__A )
a_ : str = semantic
a_ : Any = coarseAcoustic
a_ : List[str] = fineAcoustic
a_ : Dict = codec
a_ : Tuple = bark_generation_config
Path(__A ).mkdir(exist_ok=__A )
bark.save_pretrained(__A , repo_id=__A , push_to_hub=__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
__lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[NestedDataStructureLike[PathLike]] = None , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[Any]:
a_ : List[str] = path_or_paths
a_ : int = split if split or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else '''train'''
a_ : Optional[int] = features
a_ : List[Any] = cache_dir
a_ : int = keep_in_memory
a_ : int = streaming
a_ : List[Any] = num_proc
a_ : str = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> Tuple:
a_ : List[Any] = features
a_ : Any = cache_dir
a_ : Optional[Any] = keep_in_memory
a_ : Tuple = streaming
a_ : int = num_proc
a_ : Dict = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[Dataset, IterableDataset]:
pass
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["input_features", "attention_mask"]
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict=80 , __SCREAMING_SNAKE_CASE : Optional[int]=1_6000 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : str=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=25 , __SCREAMING_SNAKE_CASE : List[str]="hamming_window" , __SCREAMING_SNAKE_CASE : Any=3_2768.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.97 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[Any]:
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = feature_size
a_ : Optional[Any] = sampling_rate
a_ : int = padding_value
a_ : Optional[int] = hop_length
a_ : Dict = win_length
a_ : Tuple = frame_signal_scale
a_ : str = preemphasis_coeff
a_ : Optional[Any] = mel_floor
a_ : Tuple = normalize_means
a_ : int = normalize_vars
a_ : List[Any] = win_function
a_ : int = return_attention_mask
a_ : List[Any] = win_length * sampling_rate // 1000
a_ : Optional[Any] = hop_length * sampling_rate // 1000
a_ : Optional[Any] = optimal_fft_length(self.sample_size )
a_ : Optional[int] = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
a_ : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=__SCREAMING_SNAKE_CASE )
else:
a_ : int = window_function(window_length=self.sample_size , name=self.win_function )
a_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
a_ : int = spectrogram(
one_waveform * self.frame_signal_scale , window=__SCREAMING_SNAKE_CASE , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__SCREAMING_SNAKE_CASE , preemphasis=self.preemphasis_coeff , mel_filters=__SCREAMING_SNAKE_CASE , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
# make sure we normalize float32 arrays
if self.normalize_means:
a_ : Optional[int] = x[:input_length].mean(axis=0 )
a_ : Tuple = np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.normalize_vars:
a_ : Optional[Any] = x[:input_length].std(axis=0 )
a_ : List[Any] = np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
a_ : List[str] = padding_value
# make sure array is in float32
a_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None ) -> List[np.ndarray]:
a_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.padding_value ) for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
a_ : str = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
a_ : Tuple = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : Tuple = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
a_ : Optional[Any] = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : Dict = [raw_speech]
# extract fbank features
a_ : int = [self._extract_mfsc_features(__SCREAMING_SNAKE_CASE ) for one_waveform in raw_speech]
# convert into correct format for padding
a_ : List[Any] = BatchFeature({'''input_features''': features} )
a_ : Union[str, Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
a_ : int = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
a_ : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
a_ : str = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
a_ : List[Any] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
a_ : List[str] = (
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
a_ : int = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
a_ : int = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=0 , ) -> Optional[int]:
a_ : Union[str, Any] = parent
a_ : Tuple = batch_size
a_ : str = seq_length
a_ : List[str] = is_training
a_ : List[str] = use_input_mask
a_ : int = use_token_type_ids
a_ : Optional[Any] = use_labels
a_ : List[Any] = vocab_size
a_ : Any = hidden_size
a_ : str = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : str = intermediate_size
a_ : Tuple = hidden_act
a_ : int = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : Union[str, Any] = type_vocab_size
a_ : List[Any] = type_sequence_label_size
a_ : Optional[Any] = initializer_range
a_ : Optional[int] = num_labels
a_ : Optional[Any] = num_choices
a_ : int = scope
a_ : Tuple = projection_dim
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
a_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
a_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : int = None
if self.use_token_type_ids:
a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Tuple = None
a_ : str = None
a_ : Optional[Any] = None
if self.use_labels:
a_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
a_ : str = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
a_ : List[Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
a_ : Optional[Any] = TFDPRContextEncoder(config=__SCREAMING_SNAKE_CASE )
a_ : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
a_ : List[Any] = TFDPRQuestionEncoder(config=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
a_ : List[str] = TFDPRReader(config=__SCREAMING_SNAKE_CASE )
a_ : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Optional[int] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[str] = config_and_inputs
a_ : int = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
a_ : Any = TFDPRModelTester(self )
a_ : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Any = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = TFDPRQuestionEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[str] = TFDPRReader.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : int = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
a_ : Any = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
a_ : Optional[int] = model(__SCREAMING_SNAKE_CASE )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
a_ : Optional[int] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Tuple , __A : Optional[Any] , __A : List[str] ):
# Initialise PyTorch model
a_ : List[str] = FunnelConfig.from_json_file(__A )
print(f'Building PyTorch model from configuration: {config}' )
a_ : Any = FunnelBaseModel(__A ) if base_model else FunnelModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__A , __A , __A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
__lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
a_ : int = self.model.config
else:
a_ : Optional[int] = config
a_ : Dict = data_args
a_ : Optional[Any] = self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
''' padding..''' )
if self.args.label_smoothing == 0:
a_ : Optional[int] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a_ : Dict = label_smoothed_nll_loss
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
if self.optimizer is None:
a_ : Any = ['''bias''', '''LayerNorm.weight''']
a_ : Any = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
a_ : Tuple = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a_ : Union[str, Any] = Adafactor
a_ : List[str] = {'''scale_parameter''': False, '''relative_step''': False}
else:
a_ : List[Any] = AdamW
a_ : Optional[int] = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
a_ : Any = self.args.learning_rate
if self.sharded_ddp:
a_ : Any = OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
a_ : int = optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
a_ : Optional[int] = self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
a_ : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a_ : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a_ : Dict = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
a_ : Any = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
a_ : Optional[int] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
a_ , a_ : List[Any] = model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
a_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
a_ : Any = torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
a_ , a_ : Any = self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
a_ : Tuple = inputs.pop('''labels''' )
a_ , a_ : Union[str, Any] = self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a_ : Optional[int] = self._prepare_inputs(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a_ : str = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a_ : Dict = self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs['''max_length'''] )
a_ : Tuple = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
a_ , a_ : Union[str, Any] = self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : str = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a_ : Optional[int] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a_ : Union[str, Any] = self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
# If PAD token is not defined at least EOS token has to be defined
a_ : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f' padded to `max_length`={max_length}' )
a_ : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
a_ : Any = tensor
return padded_tensor
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
__lowerCAmelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowerCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
__lowerCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__lowerCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( __A : str ):
with open(__A , '''rb''' ) as f:
a_ : Optional[int] = Image.open(__A )
return im.convert('''RGB''' )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "A folder containing the training data."} )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "A folder containing the validation data."} )
snake_case__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(SCREAMING_SNAKE_CASE_ )} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
snake_case__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Name or path of preprocessor config."} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _UpperCAmelCase ( __A : Dict ):
a_ : List[str] = torch.stack([example['''pixel_values'''] for example in examples] )
a_ : Dict = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , __A , __A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ : Any = training_args.get_process_log_level()
logger.setLevel(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
a_ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
a_ : str = {}
if data_args.train_dir is not None:
a_ : Optional[Any] = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
a_ : str = os.path.join(data_args.validation_dir , '''**''' )
a_ : Dict = load_dataset(
'''imagefolder''' , data_files=__A , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
a_ : Tuple = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __A ) and data_args.train_val_split > 0.0:
a_ : Tuple = dataset['''train'''].train_test_split(data_args.train_val_split )
a_ : Tuple = split['''train''']
a_ : Any = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a_ : Union[str, Any] = dataset['''train'''].features['''labels'''].names
a_ , a_ : Tuple = {}, {}
for i, label in enumerate(__A ):
a_ : str = str(__A )
a_ : str = label
# Load the accuracy metric from the datasets package
a_ : List[Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : Any ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
a_ : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel=__A , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ : Any = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
a_ : Union[str, Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a_ : int = image_processor.size['''shortest_edge''']
else:
a_ : Dict = (image_processor.size['''height'''], image_processor.size['''width'''])
a_ : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
a_ : int = Compose(
[
RandomResizedCrop(__A ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a_ : Union[str, Any] = Compose(
[
Resize(__A ),
CenterCrop(__A ),
ToTensor(),
normalize,
] )
def train_transforms(__A : Optional[int] ):
a_ : Any = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(__A : Optional[Any] ):
a_ : Dict = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
a_ : Tuple = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__A )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
a_ : List[str] = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__A )
# Initalize our trainer
a_ : Optional[Any] = Trainer(
model=__A , args=__A , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
a_ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
a_ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ : str = last_checkpoint
a_ : Optional[Any] = trainer.train(resume_from_checkpoint=__A )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a_ : Any = trainer.evaluate()
trainer.log_metrics('''eval''' , __A )
trainer.save_metrics('''eval''' , __A )
# Write model card and (optionally) push to hub
a_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
if __name__ == "__main__":
main()
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bert-generation"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=5_0358 , __SCREAMING_SNAKE_CASE : Optional[int]=1024 , __SCREAMING_SNAKE_CASE : List[str]=24 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : str=4096 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Any=1e-12 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[str]="absolute" , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : List[str] = vocab_size
a_ : List[Any] = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Union[str, Any] = hidden_act
a_ : Dict = intermediate_size
a_ : List[str] = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : Optional[int] = initializer_range
a_ : Optional[int] = layer_norm_eps
a_ : Tuple = position_embedding_type
a_ : Dict = use_cache
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "git_vision_model"
def __init__( self : int , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : int=3072 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Dict=224 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : List[Any]="quick_gelu" , __SCREAMING_SNAKE_CASE : str=1e-5 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> str:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : Tuple = hidden_size
a_ : Dict = intermediate_size
a_ : Any = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : Dict = num_channels
a_ : Tuple = patch_size
a_ : Optional[int] = image_size
a_ : Union[str, Any] = initializer_range
a_ : Optional[int] = attention_dropout
a_ : List[str] = layer_norm_eps
a_ : int = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
a_ , a_ : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
a_ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "git"
def __init__( self : str , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=3_0522 , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : str=6 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : str=3072 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1024 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : str=1e-12 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Any="absolute" , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Any=101 , __SCREAMING_SNAKE_CASE : Tuple=102 , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[Any]:
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if vision_config is None:
a_ : List[Any] = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
a_ : str = GitVisionConfig(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = vocab_size
a_ : Any = hidden_size
a_ : Any = num_hidden_layers
a_ : int = num_attention_heads
a_ : List[Any] = hidden_act
a_ : Dict = intermediate_size
a_ : Optional[Any] = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Dict = max_position_embeddings
a_ : Any = initializer_range
a_ : Optional[int] = layer_norm_eps
a_ : str = position_embedding_type
a_ : Union[str, Any] = use_cache
a_ : Any = tie_word_embeddings
a_ : str = num_image_with_embedding
a_ : int = bos_token_id
a_ : Optional[int] = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
a_ : List[Any] = copy.deepcopy(self.__dict__ )
a_ : Tuple = self.vision_config.to_dict()
a_ : Union[str, Any] = self.__class__.model_type
return output
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __SCREAMING_SNAKE_CASE : int = 0 ) -> Optional[int]:
a_ : str = key
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> list[str]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> list[str]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0 ) -> str:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a_ : List[Any] = ''''''
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0 ) -> str:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a_ : Any = ''''''
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0 ) -> bool:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
with open(__SCREAMING_SNAKE_CASE ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
with open(__SCREAMING_SNAKE_CASE ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
assert isinstance(__A , __A ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
a_ : str = f'The input value of [n={number}] has to be > 0'
raise ValueError(__A )
else:
a_ : List[str] = sylvester(number - 1 )
a_ : Optional[Any] = num - 1
a_ : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int | None = None ) -> Optional[int]:
a_ : Optional[int] = value
a_ : Node | None = None # Added in order to delete a node easier
a_ : Node | None = None
a_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node | None = None ) -> Tuple:
a_ : Optional[int] = root
def __str__( self : Optional[int] ) -> str:
return str(self.root )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Node , __SCREAMING_SNAKE_CASE : Node | None ) -> None:
if new_children is not None: # reset its kids
a_ : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__SCREAMING_SNAKE_CASE ): # If it is the right children
a_ : Dict = new_children
else:
a_ : Tuple = new_children
else:
a_ : List[Any] = new_children
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.root is None
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> None:
a_ : Union[str, Any] = Node(__SCREAMING_SNAKE_CASE ) # create a new Node
if self.empty(): # if Tree is empty
a_ : Union[str, Any] = new_node # set its root
else: # Tree is not empty
a_ : Dict = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a_ : List[Any] = new_node # We insert the new node in a leaf
break
else:
a_ : Optional[Any] = parent_node.left
else:
if parent_node.right is None:
a_ : Dict = new_node
break
else:
a_ : str = parent_node.right
a_ : Dict = parent_node
def SCREAMING_SNAKE_CASE ( self : Dict , *__SCREAMING_SNAKE_CASE : List[str] ) -> None:
for value in values:
self.__insert(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
a_ : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a_ : Any = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
a_ : Union[str, Any] = self.root
if not self.empty():
while node.right is not None:
a_ : Tuple = node.right
return node
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Node | None = None ) -> Node | None:
if node is None:
a_ : str = self.root
if self.root is None:
return None
if not self.empty():
a_ : Any = self.root
while node.left is not None:
a_ : Optional[Any] = node.left
return node
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> None:
a_ : Union[str, Any] = self.search(__SCREAMING_SNAKE_CASE ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif node.left is None: # Has only right children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE , node.left )
else:
a_ : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
a_ : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str]=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Node | None ) -> None:
if node:
self.inorder(__SCREAMING_SNAKE_CASE , node.left )
arr.append(node.value )
self.inorder(__SCREAMING_SNAKE_CASE , node.right )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Node ) -> int:
a_ : list[int] = []
self.inorder(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # append all values to list using inorder traversal
return arr[k - 1]
def _UpperCAmelCase ( __A : Node | None ):
a_ : Optional[Any] = []
if curr_node is not None:
a_ : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _UpperCAmelCase ( ):
a_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
a_ : int = BinarySearchTree()
for i in testlist:
t.insert(__A )
# Prints all the elements of the list in order traversal
print(__A )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__A )
print(__A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
import math
def _UpperCAmelCase ( __A : list , __A : int = 0 , __A : int = 0 ):
a_ : Dict = end or len(__A )
for i in range(__A , __A ):
a_ : Union[str, Any] = i
a_ : List[str] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a_ : Tuple = array[temp_index - 1]
temp_index -= 1
a_ : Optional[Any] = temp_index_value
return array
def _UpperCAmelCase ( __A : list , __A : int , __A : int ): # Max Heap
a_ : int = index
a_ : str = 2 * index + 1 # Left Node
a_ : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a_ : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a_ : Any = right_index
if largest != index:
a_ , a_ : Tuple = array[largest], array[index]
heapify(__A , __A , __A )
def _UpperCAmelCase ( __A : list ):
a_ : Union[str, Any] = len(__A )
for i in range(n // 2 , -1 , -1 ):
heapify(__A , __A , __A )
for i in range(n - 1 , 0 , -1 ):
a_ , a_ : Any = array[0], array[i]
heapify(__A , 0 , __A )
return array
def _UpperCAmelCase ( __A : list , __A : int , __A : int , __A : int ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _UpperCAmelCase ( __A : list , __A : int , __A : int , __A : int ):
a_ : str = low
a_ : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a_ , a_ : Union[str, Any] = array[j], array[i]
i += 1
def _UpperCAmelCase ( __A : list ):
if len(__A ) == 0:
return array
a_ : List[str] = 2 * math.ceil(math.loga(len(__A ) ) )
a_ : Optional[Any] = 16
return intro_sort(__A , 0 , len(__A ) , __A , __A )
def _UpperCAmelCase ( __A : list , __A : int , __A : int , __A : int , __A : int ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__A )
max_depth -= 1
a_ : Optional[int] = median_of_a(__A , __A , start + ((end - start) // 2) + 1 , end - 1 )
a_ : List[str] = partition(__A , __A , __A , __A )
intro_sort(__A , __A , __A , __A , __A )
a_ : Optional[Any] = p
return insertion_sort(__A , __A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input('Enter numbers separated by a comma : ').strip()
__lowerCAmelCase = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( folder_based_builder.FolderBasedBuilderConfig ):
snake_case__ = None
snake_case__ = None
class SCREAMING_SNAKE_CASE ( folder_based_builder.FolderBasedBuilder ):
snake_case__ = datasets.Audio()
snake_case__ = "audio"
snake_case__ = AudioFolderConfig
snake_case__ = 42 # definition at the bottom of the script
snake_case__ = AudioClassification(audio_column="audio" , label_column="label" )
__lowerCAmelCase = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__lowerCAmelCase = AUDIO_EXTENSIONS
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCAmelCase ( *__A : List[Any] , __A : Optional[Union[Dict, Any]] = None , __A : List[str]=True , __A : List[Any]=2 ):
from .. import __version__
a_ : Tuple = take_from
a_ : Optional[int] = ()
if not isinstance(args[0] , __A ):
a_ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__A ).base_version ) >= version.parse(__A ):
raise ValueError(
f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
f' version {__version__} is >= {version_name}' )
a_ : List[Any] = None
if isinstance(__A , __A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__A ),)
a_ : Any = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(__A , __A ):
values += (getattr(__A , __A ),)
a_ : List[Any] = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
a_ : str = f'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
a_ : Dict = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __A , stacklevel=__A )
if isinstance(__A , __A ) and len(__A ) > 0:
a_ : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1]
a_ : Optional[int] = call_frame.filename
a_ : List[str] = call_frame.lineno
a_ : List[str] = call_frame.function
a_ , a_ : str = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(__A ) == 0:
return
elif len(__A ) == 1:
return values[0]
return values
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = '▁'
__lowerCAmelCase = {'vocab_file': 'spiece.model'}
__lowerCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__lowerCAmelCase = {
'google/pegasus-xsum': 512,
}
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict="<pad>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : Dict="<mask_2>" , __SCREAMING_SNAKE_CASE : Dict="<mask_1>" , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Tuple=103 , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None:
a_ : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError(
f'additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is'
f' {type(__SCREAMING_SNAKE_CASE )}' )
a_ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(__SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
a_ : Dict = additional_special_tokens_extended
else:
a_ : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
a_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token_sent=__SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Optional[int] = mask_token_sent
a_ : Union[str, Any] = vocab_file
a_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
a_ : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
a_ : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, int]:
a_ : Any = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> List[str]:
a_ : str = self.__dict__.copy()
a_ : Tuple = None
return state
def __setstate__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
a_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : str = {}
a_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
a_ : Tuple = self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
a_ : List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Any:
a_ : int = []
a_ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : List[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Tuple:
return 1
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
a_ : Optional[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : List , __SCREAMING_SNAKE_CASE : Optional[List] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(__SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Any = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _UpperCAmelCase ( __A : int ):
a_ : Tuple = botoa.client('''iam''' )
a_ : Tuple = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__A , AssumeRolePolicyDocument=json.dumps(__A , indent=2 ) )
a_ : Optional[Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__A , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(__A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'role {role_name} already exists. Using existing one' )
def _UpperCAmelCase ( __A : Tuple ):
a_ : Optional[Any] = botoa.client('''iam''' )
return iam_client.get_role(RoleName=__A )["Role"]["Arn"]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , __A , )
a_ : Optional[int] = None
if credentials_configuration == 0:
a_ : int = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
a_ : int = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
a_ : List[str] = _ask_field('''AWS Access Key ID: ''' )
a_ : Optional[int] = aws_access_key_id
a_ : int = _ask_field('''AWS Secret Access Key: ''' )
a_ : Optional[int] = aws_secret_access_key
a_ : Dict = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
a_ : Dict = aws_region
a_ : List[Any] = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , __A , )
if role_management == 0:
a_ : Union[str, Any] = _ask_field('''Enter your IAM role name: ''' )
else:
a_ : List[str] = '''accelerate_sagemaker_execution_role'''
print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(__A )
a_ : List[str] = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__A , error_message='''Please enter yes or no.''' , )
a_ : Union[str, Any] = None
if is_custom_docker_image:
a_ : Union[str, Any] = _ask_field('''Enter your Docker image: ''' , lambda __A : str(__A ).lower() )
a_ : Optional[Any] = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__A , error_message='''Please enter yes or no.''' , )
a_ : Tuple = None
if is_sagemaker_inputs_enabled:
a_ : Optional[int] = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda __A : str(__A ).lower() , )
a_ : Optional[int] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__A , error_message='''Please enter yes or no.''' , )
a_ : Optional[int] = None
if is_sagemaker_metrics_enabled:
a_ : List[Any] = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda __A : str(__A ).lower() , )
a_ : Optional[int] = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
a_ : Tuple = {}
a_ : Optional[Any] = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=__A , error_message='''Please enter yes or no.''' , )
if use_dynamo:
a_ : str = '''dynamo_'''
a_ : List[str] = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
a_ : Dict = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__A , error_message='''Please enter yes or no.''' , )
if use_custom_options:
a_ : int = _ask_options(
'''Which mode do you want to use?''' , __A , lambda __A : TORCH_DYNAMO_MODES[int(__A )] , default='''default''' , )
a_ : str = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__A , error_message='''Please enter yes or no.''' , )
a_ : str = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__A , error_message='''Please enter yes or no.''' , )
a_ : Optional[int] = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
a_ : Optional[Any] = _ask_options(
__A , __A , lambda __A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
a_ : List[Any] = _ask_field(__A , lambda __A : str(__A ).lower() , default='''ml.p3.2xlarge''' )
a_ : List[Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
a_ : Tuple = _ask_field(
'''How many machines do you want use? [1]: ''' , __A , default=1 , )
a_ : Dict = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=__A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__A , use_cpu=__A , dynamo_config=__A , eca_instance_type=__A , profile=__A , region=__A , iam_role_name=__A , mixed_precision=__A , num_machines=__A , sagemaker_inputs_file=__A , sagemaker_metrics_file=__A , )
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : int ) -> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__lowerCAmelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__lowerCAmelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__lowerCAmelCase = BeautifulSoup(res.text, 'html.parser')
__lowerCAmelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase = 'UperNetConfig'
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int], str] = 0 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
a_ : Optional[Any] = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
a_ : Any = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = nn.ReLU()
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
a_ : Any = self.conv(__SCREAMING_SNAKE_CASE )
a_ : Dict = self.batch_norm(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
super().__init__()
a_ : Union[str, Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
a_ : str = input
for layer in self.layers:
a_ : List[Any] = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple[int, ...] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool ) -> None:
super().__init__()
a_ : Optional[int] = pool_scales
a_ : Any = align_corners
a_ : Optional[int] = in_channels
a_ : int = channels
a_ : Dict = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
a_ : Any = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[torch.Tensor]:
a_ : List[Any] = []
for ppm in self.blocks:
a_ : str = ppm(__SCREAMING_SNAKE_CASE )
a_ : Dict = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
super().__init__()
a_ : Optional[Any] = config
a_ : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
a_ : str = in_channels
a_ : Optional[int] = config.hidden_size
a_ : str = False
a_ : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a_ : Optional[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a_ : Optional[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a_ : Tuple = nn.ModuleList()
a_ : Any = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a_ : Dict = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
a_ : List[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> Any:
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
a_ : Tuple = inputs[-1]
a_ : List[str] = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
a_ : Dict = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
a_ : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
# build laterals
a_ : Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
a_ : Any = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : Optional[Any] = laterals[i - 1].shape[2:]
a_ : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
a_ : Tuple = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : str = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
a_ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
a_ : List[str] = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
a_ : str = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
a_ : Any = config
a_ : Optional[Any] = config.auxiliary_in_channels
a_ : Optional[Any] = config.auxiliary_channels
a_ : Tuple = config.auxiliary_num_convs
a_ : Tuple = config.auxiliary_concat_input
a_ : Dict = in_index
a_ : str = (kernel_size // 2) * dilation
a_ : List[str] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
a_ : Optional[int] = nn.Identity()
else:
a_ : Optional[int] = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
a_ : str = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
a_ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
a_ : str = encoder_hidden_states[self.in_index]
a_ : Optional[int] = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
a_ : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a_ : str = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = UperNetConfig
snake_case__ = "pixel_values"
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=False ) -> str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : Optional[Any] = value
__lowerCAmelCase = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a_ : List[str] = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
a_ : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
a_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : List[str] = output_attentions if output_attentions is not None else self.config.output_attentions
a_ : Union[str, Any] = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = outputs.feature_maps
a_ : Optional[Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
a_ : Any = None
if self.auxiliary_head is not None:
a_ : Optional[int] = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
a_ : str = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
a_ : List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a_ : List[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a_ : Union[str, Any] = (logits,) + outputs[1:]
else:
a_ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=13 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : int=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Dict=[2, 2, 3, 2] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : str=None , ) -> int:
a_ : Any = parent
a_ : Tuple = batch_size
a_ : Tuple = image_size
a_ : Union[str, Any] = num_channels
a_ : Any = num_stages
a_ : str = hidden_sizes
a_ : Optional[Any] = depths
a_ : List[Any] = is_training
a_ : List[str] = use_labels
a_ : List[Any] = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : Tuple = type_sequence_label_size
a_ : str = initializer_range
a_ : Tuple = out_features
a_ : Optional[Any] = num_labels
a_ : List[str] = scope
a_ : Optional[Any] = num_stages
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Optional[int] = None
if self.use_labels:
a_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=__SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
a_ : str = UperNetForSemanticSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : Any = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) ,
) : Dict = config_and_inputs
a_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : Union[str, Any] = UperNetModelTester(self )
a_ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(__SCREAMING_SNAKE_CASE )
a_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Tuple = [*signature.parameters.keys()]
a_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
a_ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
a_ : Any = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
a_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Union[str, Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : int = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Tuple = _config_zero_init(__SCREAMING_SNAKE_CASE )
a_ : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ):
a_ : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
a_ : Tuple = Image.open(__A ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
a_ : List[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = prepare_img()
a_ : Union[str, Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
a_ : Tuple = model(**__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
a_ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__SCREAMING_SNAKE_CASE )
a_ : int = prepare_img()
a_ : str = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
a_ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
a_ : str = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "BlipImageProcessor"
snake_case__ = "AutoTokenizer"
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# add QFormer tokenizer
a_ : Dict = qformer_tokenizer
def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : ImageInput = None , __SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : str , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
a_ : List[str] = BatchFeature()
if text is not None:
a_ : str = self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
encoding.update(__SCREAMING_SNAKE_CASE )
a_ : Any = self.qformer_tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
a_ : str = qformer_text_encoding.pop('''input_ids''' )
a_ : str = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
a_ : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
encoding.update(__SCREAMING_SNAKE_CASE )
return encoding
def SCREAMING_SNAKE_CASE ( self : Any , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : Any = self.tokenizer.model_input_names
a_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Any ) -> Dict:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
return super().save_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , __SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
a_ : Any = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder='''qformer_tokenizer''' )
a_ : List[Any] = cls._get_arguments_from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
args.append(__SCREAMING_SNAKE_CASE )
return cls(*__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : int=18 , __SCREAMING_SNAKE_CASE : int=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=400 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]=[0.4814_5466, 0.457_8275, 0.4082_1073] , __SCREAMING_SNAKE_CASE : Any=[0.2686_2954, 0.2613_0258, 0.2757_7711] , __SCREAMING_SNAKE_CASE : Optional[int]=True , ) -> str:
a_ : Dict = size if size is not None else {'''height''': 224, '''width''': 224}
a_ : str = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
a_ : Dict = parent
a_ : Optional[int] = batch_size
a_ : Optional[Any] = num_channels
a_ : Optional[int] = image_size
a_ : List[Any] = min_resolution
a_ : Tuple = max_resolution
a_ : Union[str, Any] = do_resize
a_ : Tuple = size
a_ : Any = do_center_crop
a_ : Optional[int] = crop_size
a_ : Optional[int] = do_normalize
a_ : Optional[int] = image_mean
a_ : Union[str, Any] = image_std
a_ : Optional[int] = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Any=False ) -> Optional[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
a_ : Dict = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
a_ : Optional[int] = []
for i in range(self.batch_size ):
a_ , a_ : Union[str, Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
a_ : List[str] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
if torchify:
a_ : Union[str, Any] = [torch.from_numpy(__SCREAMING_SNAKE_CASE ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : Dict = ChineseCLIPImageProcessingTester(self , do_center_crop=__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
a_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
# Initialize image_processing
a_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a_ : List[Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
# Initialize image_processing
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : int = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
a_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a_ : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
# Initialize image_processing
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a_ : Union[str, Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = 3
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
# Initialize image_processing
a_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a_ : List[Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.