code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowerCamelCase ( UpperCAmelCase__ : Any ): if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_tf class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Dict ): pass def SCREAMING_SNAKE_CASE_ ( self : int ): pass def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any]=None , **lowercase_ : Any ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Dict = TFVisionTextDualEncoderModel(lowercase_ ) lowercase_ : int = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) lowercase_ : Union[str, Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=None , **lowercase_ : List[str] ): lowercase_ : str = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : List[str] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str=None , **lowercase_ : Any ): lowercase_ : Tuple = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) lowercase_ : int = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : str = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : Dict = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Optional[int] = after_output[0].numpy() lowercase_ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Any=None , **lowercase_ : Union[str, Any] ): lowercase_ : List[str] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : List[Any] = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : int = to_atuple(vision_model.config.image_size ) lowercase_ : int = to_atuple(vision_model.config.patch_size ) lowercase_ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Optional[int] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : str = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : str = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Dict = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Tuple = self.get_pretrained_model_and_inputs() lowercase_ : List[str] = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : int = model_a(**lowercase_ ) lowercase_ : str = after_outputs[0].numpy() lowercase_ : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_tf class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) lowercase_ : Optional[int] = 13 lowercase_ : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowercase_ : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowercase_ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowercase_ : Any = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Union[str, Any] ): lowercase_ : Any = TFViTModel(lowercase_ , name="""vision_model""" ) lowercase_ : Optional[Any] = TFBertModel(lowercase_ , name="""text_model""" ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[int] = TFViTModelTester(self ) lowercase_ : Tuple = TFBertModelTester(self ) lowercase_ : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowercase_ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ : Tuple = vision_config_and_inputs ( lowercase_ ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : str ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. lowercase_ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) lowercase_ : Optional[int] = 13 lowercase_ : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowercase_ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowercase_ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowercase_ : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : Union[str, Any] ): lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) lowercase_ : int = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : str = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowercase_ : Dict = to_atuple(vision_model.config.image_size ) lowercase_ : Tuple = to_atuple(vision_model.config.patch_size ) lowercase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : List[str] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Dict = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[str] , lowercase_ : Dict ): lowercase_ : Dict = TFDeiTModel(lowercase_ , name="""vision_model""" ) lowercase_ : Tuple = TFRobertaModel(lowercase_ , name="""text_model""" ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Optional[Any] = TFDeiTModelTester(self ) lowercase_ : int = TFRobertaModelTester(self ) lowercase_ : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowercase_ : int = bert_model_tester.prepare_config_and_inputs() lowercase_ : int = vision_config_and_inputs ( lowercase_ ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) lowercase_ : List[Any] = 13 lowercase_ : Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowercase_ : Any = random_attention_mask([batch_size, 4] ) lowercase_ : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : List[Any] ): lowercase_ : Dict = TFCLIPVisionModel(lowercase_ , name="""vision_model""" ) lowercase_ : Dict = TFBertModel(lowercase_ , name="""text_model""" ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Optional[int] = TFCLIPVisionModelTester(self ) lowercase_ : Union[str, Any] = TFBertModelTester(self ) lowercase_ : int = clip_model_tester.prepare_config_and_inputs() lowercase_ : Dict = bert_model_tester.prepare_config_and_inputs() lowercase_ : int = vision_config_and_inputs ( lowercase_ ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Tuple = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=lowercase_ ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Any = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : Dict = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : List[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase_ , atol=1E-3 ) )
359
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
0
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __magic_name__ ( _UpperCAmelCase): def __init__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any=13 , lowercase_ : Optional[int]=7 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Any=99 , lowercase_ : List[str]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Tuple=4 , lowercase_ : int=37 , lowercase_ : Any="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Any=0.02 , lowercase_ : str=False , lowercase_ : List[Any]=True , lowercase_ : Dict="None" , lowercase_ : Tuple=3 , lowercase_ : Any=4 , lowercase_ : Tuple=None , ): lowercase_ : int = parent lowercase_ : List[Any] = batch_size lowercase_ : Optional[Any] = seq_length lowercase_ : List[str] = is_training lowercase_ : List[Any] = use_input_mask lowercase_ : Optional[int] = use_token_type_ids lowercase_ : Union[str, Any] = use_labels lowercase_ : Dict = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[Any] = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : Any = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Dict = type_vocab_size lowercase_ : List[str] = type_sequence_label_size lowercase_ : List[Any] = initializer_range lowercase_ : Union[str, Any] = num_labels lowercase_ : Any = num_choices lowercase_ : Optional[int] = relative_attention lowercase_ : Tuple = position_biased_input lowercase_ : List[str] = pos_att_type lowercase_ : Any = scope def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ : Optional[Any] = None if self.use_input_mask: lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowercase_ : List[str] = None if self.use_token_type_ids: lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ : str = None lowercase_ : List[str] = None lowercase_ : List[Any] = None if self.use_labels: lowercase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ : str = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self : str ): return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_config() lowercase_ : int = 300 return config def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Tuple ): lowercase_ : str = DebertaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0] lowercase_ : Any = model(lowercase_ , token_type_ids=lowercase_ )[0] lowercase_ : Any = model(lowercase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str ): lowercase_ : Any = DebertaForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int ): lowercase_ : List[Any] = self.num_labels lowercase_ : Optional[Any] = DebertaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str ): lowercase_ : List[Any] = self.num_labels lowercase_ : List[Any] = DebertaForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict ): lowercase_ : Any = DebertaForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Optional[Any] = self.prepare_config_and_inputs() ( lowercase_ ) : List[Any] = config_and_inputs lowercase_ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Optional[int] = DebertaModelTester(self ) lowercase_ : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = DebertaModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_torch @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @unittest.skip(reason="""Model not available yet""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) lowercase_ : Union[str, Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) lowercase_ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase_ : int = model(lowercase_ , attention_mask=lowercase_ )[0] # compare the actual values for a slice. lowercase_ : str = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
360
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
0
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
361
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ): lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[Any] = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def lowerCamelCase ( ) -> Any: lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Any = Image.open(dataset[4]["""file"""] ) lowercase_ : Dict = Image.open(dataset[5]["""file"""] ) lowercase_ : int = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[int] = prepare_images() # test non-batched lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Union[str, Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
21
0
'''simple docstring''' import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ : UpperCamelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} ) UpperCamelCase__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) UpperCamelCase__ = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = self.task_name.lower() class __magic_name__ ( _UpperCAmelCase ): UpperCamelCase__ = '''train''' UpperCamelCase__ = '''dev''' UpperCamelCase__ = '''test''' class __magic_name__ ( _UpperCAmelCase ): UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 def __init__( self : str , lowercase_ : GlueDataTrainingArguments , lowercase_ : PreTrainedTokenizerBase , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Split] = Split.train , lowercase_ : Optional[str] = None , ): warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowercase_ , ) lowercase_ : Union[str, Any] = args lowercase_ : List[Any] = glue_processors[args.task_name]() lowercase_ : Optional[Any] = glue_output_modes[args.task_name] if isinstance(lowercase_ , lowercase_ ): try: lowercase_ : List[Any] = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file lowercase_ : str = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , ) lowercase_ : Tuple = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowercase_ : List[str] = label_list[2], label_list[1] lowercase_ : Union[str, Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowercase_ : List[Any] = cached_features_file + """.lock""" with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: lowercase_ : Optional[Any] = time.time() lowercase_ : Optional[int] = torch.load(lowercase_ ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) else: logger.info(f'''Creating features from dataset file at {args.data_dir}''' ) if mode == Split.dev: lowercase_ : Union[str, Any] = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowercase_ : Tuple = self.processor.get_test_examples(args.data_dir ) else: lowercase_ : Optional[Any] = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowercase_ : Dict = examples[:limit_length] lowercase_ : List[Any] = glue_convert_examples_to_features( lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , ) lowercase_ : int = time.time() torch.save(self.features , lowercase_ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self : List[Any] ): return len(self.features ) def __getitem__( self : int , lowercase_ : Optional[Any] ): return self.features[i] def SCREAMING_SNAKE_CASE_ ( self : int ): return self.label_list
362
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
0
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __magic_name__ ( _UpperCAmelCase): def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase_ , """tf_padding""" ) ) self.parent.assertTrue(hasattr(lowercase_ , """depth_multiplier""" ) ) class __magic_name__ : def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : List[str]=3 , lowercase_ : Tuple=32 , lowercase_ : List[Any]=0.25 , lowercase_ : int=8 , lowercase_ : Any=8 , lowercase_ : Tuple=6 , lowercase_ : Union[str, Any]=32 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]="relu6" , lowercase_ : Dict=1280 , lowercase_ : Any=0.1 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=None , ): lowercase_ : Optional[int] = parent lowercase_ : Any = batch_size lowercase_ : List[str] = num_channels lowercase_ : Union[str, Any] = image_size lowercase_ : List[Any] = depth_multiplier lowercase_ : Dict = depth_divisible_by lowercase_ : Optional[Any] = min_depth lowercase_ : Dict = expand_ratio lowercase_ : str = tf_padding lowercase_ : List[str] = output_stride lowercase_ : List[str] = first_layer_is_expansion lowercase_ : List[str] = finegrained_output lowercase_ : Optional[Any] = hidden_act lowercase_ : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) lowercase_ : List[str] = classifier_dropout_prob lowercase_ : List[Any] = use_labels lowercase_ : int = is_training lowercase_ : Tuple = num_labels lowercase_ : Any = initializer_range lowercase_ : Dict = scope def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : int = None lowercase_ : List[str] = None if self.use_labels: lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase_ : int = self.get_config() return config, pixel_values, labels, pixel_labels def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = MobileNetVaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] ): lowercase_ : Any = self.num_labels lowercase_ : int = MobileNetVaForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : int ): lowercase_ : Optional[int] = self.num_labels lowercase_ : Optional[int] = MobileNetVaForSemanticSegmentation(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : str = model(lowercase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowercase_ : Dict = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ : Tuple = config_and_inputs lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification, '''image-segmentation''': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = MobileNetVaModelTester(self ) lowercase_ : Optional[int] = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : int ): pass @unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): pass @unittest.skip(reason="""MobileNetV2 does not output attentions""" ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(lowercase_ ) lowercase_ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): def check_hidden_states_output(lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : int ): lowercase_ : int = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Optional[int] = outputs.hidden_states lowercase_ : List[Any] = 16 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : str = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : str = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = MobileNetVaModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> Tuple: lowercase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : str ): return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(lowercase_ ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : Optional[Any] = prepare_img() lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : Optional[int] = model(**lowercase_ ) # verify the logits lowercase_ : Optional[Any] = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : List[str] = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) lowercase_ : List[str] = model.to(lowercase_ ) lowercase_ : Optional[Any] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) lowercase_ : Optional[Any] = prepare_img() lowercase_ : str = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase_ : int = model(**lowercase_ ) lowercase_ : List[Any] = outputs.logits # verify the logits lowercase_ : List[Any] = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , lowercase_ ) lowercase_ : List[Any] = torch.tensor( [ [[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]], [[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]], [[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]], ] , device=lowercase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
363
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
21
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = ['''image_processor''', '''tokenizer'''] UpperCamelCase__ = '''FlavaImageProcessor''' UpperCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : str , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , **lowercase_ : int ): lowercase_ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase_ , ) lowercase_ : Dict = kwargs.pop("""feature_extractor""" ) lowercase_ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase_ , lowercase_ ) lowercase_ : List[str] = self.image_processor def __call__( self : Tuple , lowercase_ : Optional[ImageInput] = None , lowercase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Union[str, Any] , ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: lowercase_ : Optional[int] = self.tokenizer( text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) if images is not None: lowercase_ : int = self.image_processor( lowercase_ , return_image_mask=lowercase_ , return_codebook_pixels=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) if text is not None and images is not None: encoding.update(lowercase_ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ): return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : str ): return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[Any] = self.tokenizer.model_input_names lowercase_ : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase_ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase_ , ) return self.image_processor
364
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Tuple: """simple docstring""" lowercase_ : Any = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = StableDiffusionLatentUpscalePipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''height''', '''width''', '''cross_attention_kwargs''', '''negative_prompt_embeds''', '''prompt_embeds''', } UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase__ = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCamelCase__ = frozenset([]) UpperCamelCase__ = True @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[str] = 1 lowercase_ : Union[str, Any] = 4 lowercase_ : Optional[Any] = (16, 16) lowercase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ ) return image def SCREAMING_SNAKE_CASE_ ( self : Tuple ): torch.manual_seed(0 ) lowercase_ : int = UNetaDConditionModel( act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=lowercase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( """KDownBlock2D""", """KCrossAttnDownBlock2D""", """KCrossAttnDownBlock2D""", """KCrossAttnDownBlock2D""", ) , in_channels=8 , mid_block_type=lowercase_ , only_cross_attention=lowercase_ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , ) lowercase_ : List[str] = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ """DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D""", ] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) lowercase_ : int = EulerDiscreteScheduler(prediction_type="""sample""" ) lowercase_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , ) lowercase_ : int = CLIPTextModel(lowercase_ ) lowercase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ : List[str] = { """unet""": model.eval(), """vae""": vae.eval(), """scheduler""": scheduler, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : Optional[Any]=0 ): if str(lowercase_ ).startswith("""mps""" ): lowercase_ : Optional[int] = torch.manual_seed(lowercase_ ) else: lowercase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase_ : str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": self.dummy_image.cpu(), """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[Any] = """cpu""" lowercase_ : Any = self.get_dummy_components() lowercase_ : str = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Optional[int] = self.get_dummy_inputs(lowercase_ ) lowercase_ : List[Any] = pipe(**lowercase_ ).images lowercase_ : Optional[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 256, 256, 3) ) lowercase_ : Any = np.array( [0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] ) lowercase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def SCREAMING_SNAKE_CASE_ ( self : str ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : str ): super().test_save_load_local(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : int ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = [ """DDIMScheduler""", """DDPMScheduler""", """PNDMScheduler""", """HeunDiscreteScheduler""", """EulerAncestralDiscreteScheduler""", """KDPM2DiscreteScheduler""", """KDPM2AncestralDiscreteScheduler""", """DPMSolverSDEScheduler""", ] lowercase_ : Union[str, Any] = self.get_dummy_components() lowercase_ : Dict = self.pipeline_class(**lowercase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Dict = self.get_dummy_inputs(lowercase_ ) lowercase_ : Optional[int] = 2 lowercase_ : int = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue lowercase_ : Union[str, Any] = getattr(lowercase_ , scheduler_enum.name ) lowercase_ : List[str] = scheduler_cls.from_config(pipe.scheduler.config ) lowercase_ : Union[str, Any] = pipe(**lowercase_ )[0] outputs.append(lowercase_ ) assert check_same_shape(lowercase_ ) @require_torch_gpu @slow class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = torch.manual_seed(33 ) lowercase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowercase_ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained( """stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa ) upscaler.to("""cuda""" ) lowercase_ : List[Any] = """a photo of an astronaut high resolution, unreal engine, ultra realistic""" lowercase_ : int = pipe(lowercase_ , generator=lowercase_ , output_type="""latent""" ).images lowercase_ : Optional[int] = upscaler( prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="""np""" , ).images[0] lowercase_ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = torch.manual_seed(33 ) lowercase_ : str = StableDiffusionLatentUpscalePipeline.from_pretrained( """stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa ) upscaler.to("""cuda""" ) lowercase_ : Union[str, Any] = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas""" lowercase_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" ) lowercase_ : Dict = upscaler( prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="""np""" , ).images[0] lowercase_ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" ) assert np.abs((expected_image - image).max() ) < 5E-2
365
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
21
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Any = logging.get_logger(__name__) _lowercase : Optional[int] = { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json" ), } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''xlm-roberta''' def __init__( self : Any , lowercase_ : Dict=30522 , lowercase_ : Optional[Any]=768 , lowercase_ : Any=12 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=3072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=0.02 , lowercase_ : str=1E-12 , lowercase_ : Union[str, Any]=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : str=2 , lowercase_ : Any="absolute" , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=None , **lowercase_ : List[Any] , ): super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase_ : int = vocab_size lowercase_ : List[Any] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : str = hidden_act lowercase_ : List[Any] = intermediate_size lowercase_ : Tuple = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Any = type_vocab_size lowercase_ : Dict = initializer_range lowercase_ : Any = layer_norm_eps lowercase_ : Dict = position_embedding_type lowercase_ : str = use_cache lowercase_ : Dict = classifier_dropout class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): if self.task == "multiple-choice": lowercase_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
366
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool: lowercase_ : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> tuple[int, int]: lowercase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowercase_ : int = x_den * y_den * z_den lowercase_ : int = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase ( UpperCAmelCase__ : int = 35 ) -> int: lowercase_ : set = set() lowercase_ : int lowercase_ : Fraction = Fraction(0 ) lowercase_ : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowercase_ : Union[str, Any] = x_num * y_den + x_den * y_num lowercase_ : int = x_den * y_den lowercase_ : List[Any] = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : Optional[Any] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=2 lowercase_ : Any = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowercase_ : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ): lowercase_ : Optional[Any] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Dict = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Any = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : Any = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=-1 lowercase_ : List[str] = x_num * y_num lowercase_ : Optional[Any] = x_den * y_num + x_num * y_den lowercase_ : Dict = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : List[str] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) # n=2 lowercase_ : Optional[Any] = x_num * x_num * y_num * y_num lowercase_ : Optional[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ): lowercase_ : List[str] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = int(sqrt(UpperCAmelCase__ ) ) lowercase_ : Dict = gcd(UpperCAmelCase__ , UpperCAmelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ : Union[str, Any] = add_three( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) unique_s.add(UpperCAmelCase__ ) for num, den in unique_s: total += Fraction(UpperCAmelCase__ , UpperCAmelCase__ ) return total.denominator + total.numerator if __name__ == "__main__": print(f"""{solution() = }""")
367
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowerCamelCase ( ) -> List[Any]: lowercase_ : Tuple = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=UpperCAmelCase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=UpperCAmelCase__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=UpperCAmelCase__ ) return parser.parse_args() def lowerCamelCase ( ) -> Optional[int]: lowercase_ : Any = parse_args() # Import training_script as a module. lowercase_ : int = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase_ : List[Any] = script_fpath.stem lowercase_ : str = importlib.import_module(UpperCAmelCase__ ) # Patch sys.argv lowercase_ : int = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
368
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
0
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
369
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
370
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
0
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Optional[int]: lowercase_ : List[Any] = [] for part_id in partition_order: lowercase_ : Optional[int] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(UpperCAmelCase__ ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowercase_ : Any = spark.range(100 ).repartition(1 ) lowercase_ : Optional[int] = Spark(UpperCAmelCase__ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> int: lowercase_ : List[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowercase_ : List[str] = spark.range(10 ).repartition(2 ) lowercase_ : int = [1, 0] lowercase_ : Any = _generate_iterable_examples(UpperCAmelCase__ , UpperCAmelCase__ ) # Reverse the partitions. lowercase_ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__ , UpperCAmelCase__ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase_ : Union[str, Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowercase_ : Dict = spark.range(10 ).repartition(1 ) lowercase_ : List[Any] = SparkExamplesIterable(UpperCAmelCase__ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Tuple: lowercase_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowercase_ : int = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: lowercase_ : Tuple = lambda UpperCAmelCase__ : x.reverse() lowercase_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__ , [2, 1, 0] ) lowercase_ : Union[str, Any] = SparkExamplesIterable(UpperCAmelCase__ ).shuffle_data_sources(UpperCAmelCase__ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ): lowercase_ : Any = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> List[Any]: lowercase_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowercase_ : int = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase_ : Union[str, Any] = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase_ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__ , [0, 2] ) for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ): lowercase_ : Any = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase_ : Optional[Any] = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__ , [1, 3] ) for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ): lowercase_ : Tuple = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Union[str, Any]: lowercase_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowercase_ : Dict = spark.range(100 ).repartition(1 ) lowercase_ : str = Spark(UpperCAmelCase__ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
371
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int ) -> list[int]: if length <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(UpperCAmelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
350
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
0
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
351
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "PIL.Image.Image" UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
21
0
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger _lowercase : Any = "<<<<<<< This should probably be modified because it mentions: " _lowercase : Any = "=======\n>>>>>>>\n" _lowercase : Dict = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] _lowercase : List[str] = [ # (pattern, replacement) # Order is important here for some replacements (r"tfds\.core", r"datasets"), (r"tf\.io\.gfile\.GFile", r"open"), (r"tf\.([\w\d]+)", r"datasets.Value('\1')"), (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"), (r"tfds\.features\.Text\(", r"datasets.Value('string'),"), (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("), (r"tfds\.features\.FeaturesDict\(", r"dict("), (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (r"tfds\.", r"datasets."), (r"dl_manager\.manual_dir", r"self.config.data_dir"), (r"self\.builder_config", r"self.config"), ] def lowerCamelCase ( UpperCAmelCase__ : Namespace ) -> Dict: return ConvertCommand(args.tfds_path , args.datasets_directory ) class __magic_name__ ( _UpperCAmelCase): @staticmethod def SCREAMING_SNAKE_CASE_ ( lowercase_ : ArgumentParser ): lowercase_ : List[Any] = parser.add_parser( """convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , ) train_parser.add_argument( """--tfds_path""" , type=lowercase_ , required=lowercase_ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , ) train_parser.add_argument( """--datasets_directory""" , type=lowercase_ , required=lowercase_ , help="""Path to the HuggingFace Datasets folder.""" ) train_parser.set_defaults(func=lowercase_ ) def __init__( self : Tuple , lowercase_ : str , lowercase_ : str , *lowercase_ : Tuple ): lowercase_ : Optional[int] = get_logger("""datasets-cli/converting""" ) lowercase_ : List[Any] = tfds_path lowercase_ : Any = datasets_directory def SCREAMING_SNAKE_CASE_ ( self : int ): if os.path.isdir(self._tfds_path ): lowercase_ : Dict = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase_ : List[str] = os.path.dirname(self._tfds_path ) else: raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" ) lowercase_ : Any = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowercase_ : Union[str, Any] = [] lowercase_ : List[str] = [] lowercase_ : Union[str, Any] = {} if os.path.isdir(self._tfds_path ): lowercase_ : Dict = os.listdir(lowercase_ ) else: lowercase_ : List[Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) lowercase_ : Dict = os.path.join(lowercase_ , lowercase_ ) lowercase_ : List[Any] = os.path.join(lowercase_ , lowercase_ ) if not os.path.isfile(lowercase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("""Skipping file""" ) continue with open(lowercase_ , encoding="""utf-8""" ) as f: lowercase_ : str = f.readlines() lowercase_ : Any = [] lowercase_ : List[Any] = False lowercase_ : Any = False lowercase_ : str = [] for line in lines: lowercase_ : Dict = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase_ : int = """import datasets\n""" elif "import tensorflow" in out_line: # order is important here lowercase_ : Dict = """""" continue elif "from absl import logging" in out_line: lowercase_ : int = """from datasets import logging\n""" elif "getLogger" in out_line: lowercase_ : Dict = out_line.replace("""getLogger""" , """get_logger""" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase_ : Optional[int] = True lowercase_ : List[Any] = list(filter(lambda lowercase_ : e in out_line , lowercase_ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase_ ) + """\n""" ) out_lines.append(lowercase_ ) out_lines.append(lowercase_ ) continue else: for pattern, replacement in TO_CONVERT: lowercase_ : Union[str, Any] = re.sub(lowercase_ , lowercase_ , lowercase_ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase_ : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowercase_ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) ) lowercase_ : Tuple = """from . import """ + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase_ : Optional[int] = True out_lines.append(lowercase_ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase_ : int = f_name.replace(""".py""" , """""" ) lowercase_ : int = os.path.join(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = os.path.join(lowercase_ , lowercase_ ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowercase_ ) if needs_manual_update: with_manual_update.append(lowercase_ ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f: f.writelines(lowercase_ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowercase_ : str = os.path.basename(lowercase_ ) lowercase_ : List[str] = imports_to_builder_map[f_name.replace(""".py""" , """""" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowercase_ , lowercase_ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
352
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase : int = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : str ) -> YolosConfig: lowercase_ : Any = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase_ : Dict = 192 lowercase_ : List[Any] = 768 lowercase_ : Dict = 12 lowercase_ : List[str] = 3 lowercase_ : List[str] = [800, 1333] lowercase_ : Tuple = False elif yolos_name == "yolos_s_dWr": lowercase_ : str = 330 lowercase_ : Tuple = 14 lowercase_ : Optional[int] = 6 lowercase_ : List[Any] = 1320 elif "yolos_s" in yolos_name: lowercase_ : Any = 384 lowercase_ : List[Any] = 1536 lowercase_ : Union[str, Any] = 12 lowercase_ : int = 6 elif "yolos_b" in yolos_name: lowercase_ : List[Any] = [800, 1344] lowercase_ : int = 91 lowercase_ : str = """huggingface/label-files""" lowercase_ : List[str] = """coco-detection-id2label.json""" lowercase_ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowercase_ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} lowercase_ : Optional[Any] = idalabel lowercase_ : List[Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : YolosConfig , UpperCAmelCase__ : bool = False ) -> Any: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase_ : Optional[int] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) lowercase_ : str = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase_ : Optional[Any] = in_proj_weight[: config.hidden_size, :] lowercase_ : Union[str, Any] = in_proj_bias[: config.hidden_size] lowercase_ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase_ : Tuple = in_proj_weight[-config.hidden_size :, :] lowercase_ : List[str] = in_proj_bias[-config.hidden_size :] def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: if "backbone" in name: lowercase_ : Union[str, Any] = name.replace("""backbone""" , """vit""" ) if "cls_token" in name: lowercase_ : int = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "det_token" in name: lowercase_ : Optional[int] = name.replace("""det_token""" , """embeddings.detection_tokens""" ) if "mid_pos_embed" in name: lowercase_ : List[str] = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" ) if "pos_embed" in name: lowercase_ : int = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: lowercase_ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "blocks" in name: lowercase_ : int = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: lowercase_ : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowercase_ : Optional[int] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowercase_ : Union[str, Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowercase_ : Any = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowercase_ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowercase_ : Any = name.replace("""mlp.fc2""" , """output.dense""" ) if "class_embed" in name: lowercase_ : int = name.replace("""class_embed""" , """class_labels_classifier""" ) if "bbox_embed" in name: lowercase_ : Optional[Any] = name.replace("""bbox_embed""" , """bbox_predictor""" ) if "vit.norm" in name: lowercase_ : Optional[int] = name.replace("""vit.norm""" , """vit.layernorm""" ) return name def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : YolosForObjectDetection ) -> dict: for key in orig_state_dict.copy().keys(): lowercase_ : Optional[int] = orig_state_dict.pop(UpperCAmelCase__ ) if "qkv" in key: lowercase_ : str = key.split(""".""" ) lowercase_ : Union[str, Any] = int(key_split[2] ) lowercase_ : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase_ : Union[str, Any] = val[:dim, :] lowercase_ : Optional[Any] = val[ dim : dim * 2, : ] lowercase_ : Any = val[-dim:, :] else: lowercase_ : List[str] = val[:dim] lowercase_ : Any = val[dim : dim * 2] lowercase_ : Optional[Any] = val[-dim:] else: lowercase_ : Union[str, Any] = val return orig_state_dict def lowerCamelCase ( ) -> torch.Tensor: lowercase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase_ : List[str] = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) return im @torch.no_grad() def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ) -> Optional[Any]: lowercase_ : int = get_yolos_config(UpperCAmelCase__ ) # load original state_dict lowercase_ : List[str] = torch.load(UpperCAmelCase__ , map_location="""cpu""" )["""model"""] # load 🤗 model lowercase_ : str = YolosForObjectDetection(UpperCAmelCase__ ) model.eval() lowercase_ : Dict = convert_state_dict(UpperCAmelCase__ , UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) # Check outputs on an image, prepared by YolosImageProcessor lowercase_ : Union[str, Any] = 800 if yolos_name != """yolos_ti""" else 512 lowercase_ : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=UpperCAmelCase__ ) lowercase_ : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowercase_ : List[Any] = model(**UpperCAmelCase__ ) lowercase_ : str = outputs.logits, outputs.pred_boxes lowercase_ : Dict = None, None if yolos_name == "yolos_ti": lowercase_ : Dict = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowercase_ : Optional[Any] = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowercase_ : Any = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowercase_ : List[Any] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowercase_ : List[str] = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowercase_ : Optional[int] = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowercase_ : List[str] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowercase_ : str = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowercase_ : Tuple = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowercase_ : str = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase__ ) if push_to_hub: lowercase_ : Any = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) lowercase_ : List[Any] = model_mapping[yolos_name] image_processor.push_to_hub(UpperCAmelCase__ , organization="""hustvl""" ) model.push_to_hub(UpperCAmelCase__ , organization="""hustvl""" ) if __name__ == "__main__": _lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowercase : Tuple = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
353
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
21
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : int = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) lowercase_ : int = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowercase_ : Optional[Any] = model(lowercase_ )["""last_hidden_state"""] lowercase_ : Tuple = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , lowercase_ ) # compare the actual values for a slice. lowercase_ : Optional[int] = tf.convert_to_tensor( [[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
354
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
0
'''simple docstring''' import torch def lowerCamelCase ( ) -> List[str]: if torch.cuda.is_available(): lowercase_ : Any = torch.cuda.device_count() else: lowercase_ : Any = 0 print(F'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
355
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
0
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 _lowercase : Dict = sys.version_info >= (3, 10) def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=UpperCAmelCase__ ) @dataclass class __magic_name__ : UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class __magic_name__ : UpperCamelCase__ = 42 UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''}) @dataclass class __magic_name__ : UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''titi''' UpperCamelCase__ = '''toto''' class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''titi''' UpperCamelCase__ = '''toto''' UpperCamelCase__ = 42 @dataclass class __magic_name__ : UpperCamelCase__ = '''toto''' def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Optional[Any] = BasicEnum(self.foo ) @dataclass class __magic_name__ : UpperCamelCase__ = '''toto''' def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class __magic_name__ : UpperCamelCase__ = None UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class __magic_name__ : UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello''']) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __magic_name__ : UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = BasicEnum(self.required_enum ) @dataclass class __magic_name__ : UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''}) UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello''']) if is_python_no_less_than_3_10: @dataclass class __magic_name__ : UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class __magic_name__ : UpperCamelCase__ = None UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ): self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowercase_ : str = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""} lowercase_ : int = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , lowercase_ ) and yy.get("""choices""" , lowercase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](lowercase_ ) , yy["""type"""](lowercase_ ) ) del xx["type"], yy["type"] self.assertEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Dict = HfArgumentParser(lowercase_ ) lowercase_ : Tuple = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ ) expected.add_argument("""--bar""" , type=lowercase_ , required=lowercase_ ) expected.add_argument("""--baz""" , type=lowercase_ , required=lowercase_ ) expected.add_argument("""--flag""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" ) self.argparsersEqual(lowercase_ , lowercase_ ) lowercase_ : int = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] (lowercase_ ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ ) self.assertFalse(example.flag ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = HfArgumentParser(lowercase_ ) lowercase_ : List[Any] = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=lowercase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" ) self.argparsersEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowercase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ ) lowercase_ : int = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase_ ) for dataclass_type in dataclass_types: lowercase_ : Optional[Any] = HfArgumentParser(lowercase_ ) self.argparsersEqual(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = parser.parse_args([] ) self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) ) lowercase_ : List[Any] = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) ) lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) ) lowercase_ : Tuple = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) ) lowercase_ : Any = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = HfArgumentParser(lowercase_ ) lowercase_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) lowercase_ : int = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowercase_ : str = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) lowercase_ : Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowercase_ : Union[str, Any] = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) lowercase_ : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): @dataclass class __magic_name__ : UpperCamelCase__ = '''toto''' lowercase_ : Optional[Any] = HfArgumentParser(lowercase_ ) lowercase_ : Any = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(lowercase_ , lowercase_ ) lowercase_ : List[Any] = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) lowercase_ : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = HfArgumentParser(lowercase_ ) lowercase_ : List[str] = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowercase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowercase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowercase_ ) self.argparsersEqual(lowercase_ , lowercase_ ) lowercase_ : Dict = parser.parse_args([] ) self.assertEqual( lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) lowercase_ : List[str] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Dict = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=lowercase_ , type=lowercase_ ) expected.add_argument("""--bar""" , default=lowercase_ , type=lowercase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=lowercase_ , type=lowercase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowercase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowercase_ ) lowercase_ : Dict = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase_ ) for dataclass_type in dataclass_types: lowercase_ : Any = HfArgumentParser(lowercase_ ) self.argparsersEqual(lowercase_ , lowercase_ ) lowercase_ : Dict = parser.parse_args([] ) self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) ) lowercase_ : int = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = HfArgumentParser(lowercase_ ) lowercase_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=lowercase_ , required=lowercase_ ) expected.add_argument("""--required_str""" , type=lowercase_ , required=lowercase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , ) self.argparsersEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Any = HfArgumentParser(lowercase_ ) lowercase_ : Tuple = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , ) expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ ) self.argparsersEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Any = HfArgumentParser(lowercase_ ) lowercase_ : List[str] = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } lowercase_ : str = parser.parse_dict(lowercase_ )[0] lowercase_ : Any = BasicExample(**lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : str = HfArgumentParser(lowercase_ ) lowercase_ : Any = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[int] = HfArgumentParser(lowercase_ ) lowercase_ : Optional[Any] = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : List[str] = os.path.join(lowercase_ , """temp_json""" ) os.mkdir(lowercase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] lowercase_ : Union[str, Any] = BasicExample(**lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Dict = HfArgumentParser(lowercase_ ) lowercase_ : int = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = os.path.join(lowercase_ , """temp_yaml""" ) os.mkdir(lowercase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(lowercase_ , lowercase_ ) lowercase_ : Any = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] lowercase_ : Any = BasicExample(**lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = HfArgumentParser(lowercase_ ) self.assertIsNotNone(lowercase_ )
356
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : int = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowercase : Any = 256047 _lowercase : List[str] = 256145 @require_sentencepiece @require_tokenizers class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = NllbTokenizer UpperCamelCase__ = NllbTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = True UpperCamelCase__ = {} def SCREAMING_SNAKE_CASE_ ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing lowercase_ : Union[str, Any] = NllbTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = NllbTokenizer(lowercase_ , keep_accents=lowercase_ ) lowercase_ : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowercase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase_ : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase_ : Optional[int] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase_ : Tuple = tempfile.mkdtemp() lowercase_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ ) lowercase_ : Dict = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) lowercase_ : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way lowercase_ : Dict = tokenizer_r.from_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=True lowercase_ : List[Any] = tempfile.mkdtemp() lowercase_ : Tuple = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) lowercase_ : Union[str, Any] = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way lowercase_ : Any = tokenizer_r.from_pretrained(lowercase_ ) lowercase_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=False lowercase_ : int = tempfile.mkdtemp() lowercase_ : Any = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) lowercase_ : Tuple = tokenizer_p.save_pretrained(lowercase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowercase_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ ) lowercase_ : Any = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : str ): if not self.test_seqaseq: return lowercase_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. lowercase_ : Any = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] lowercase_ : Any = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: lowercase_ : List[Any] = tokenizer.prepare_seqaseq_batch( src_texts=lowercase_ , tgt_texts=lowercase_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified lowercase_ : int = tokenizer.prepare_seqaseq_batch( lowercase_ , tgt_texts=lowercase_ , max_length=3 , return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) lowercase_ : List[Any] = tokenizer.prepare_seqaseq_batch( src_texts=lowercase_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("""decoder_input_ids""" , lowercase_ ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Any ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Dict = [AddedToken("""<special>""" , lstrip=lowercase_ )] lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ ) lowercase_ : Tuple = tokenizer_r.encode("""Hey this is a <special> token""" ) lowercase_ : List[Any] = tokenizer_r.encode("""<special>""" , add_special_tokens=lowercase_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: lowercase_ : int = self.rust_tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) lowercase_ : str = self.tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ ) lowercase_ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" ) lowercase_ : int = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(lowercase_ , lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): UpperCamelCase__ = '''facebook/nllb-200-distilled-600M''' UpperCamelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] UpperCamelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] UpperCamelCase__ = [ 25_6047, 1_6297, 13_4408, 8165, 24_8066, 1_4734, 950, 1135, 10_5721, 3573, 83, 2_7352, 108, 4_9486, 2, ] @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] ): lowercase_ : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" ) lowercase_ : Any = 1 return cls def SCREAMING_SNAKE_CASE_ ( self : Tuple ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): self.assertIn(lowercase_ , self.tokenizer.all_special_ids ) # fmt: off lowercase_ : List[str] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: on lowercase_ : Optional[Any] = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ ) lowercase_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertNotIn(self.tokenizer.eos_token , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Tuple = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , lowercase_ ) lowercase_ : List[str] = 10 lowercase_ : Dict = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , lowercase_ ) self.assertEqual(len(lowercase_ ) , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Optional[Any] = tempfile.mkdtemp() lowercase_ : Any = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase_ ) lowercase_ : Tuple = NllbTokenizer.from_pretrained(lowercase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) lowercase_ : Tuple = shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) lowercase_ : Any = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowercase_ ) self.assertEqual(lowercase_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[Any] = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors="""pt""" ) lowercase_ : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors="""pt""" ) lowercase_ : Optional[Any] = targets["""input_ids"""] lowercase_ : int = shift_tokens_right( lowercase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(lowercase_ ) , { # A, test, EOS, en_XX """input_ids""": [[256047, 70, 7356, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 256057, } , ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Any = True lowercase_ : Optional[int] = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) lowercase_ : List[str] = False lowercase_ : Optional[Any] = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
357
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ): lowercase_ : Optional[Any] = {} lowercase_ : Tuple = {} if prompt is not None: lowercase_ : Tuple = prompt if generate_kwargs is not None: lowercase_ : List[str] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase_ : str = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ): lowercase_ : List[Any] = load_image(lowercase_ ) if prompt is not None: if not isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase_ : List[Any] = self.model.config.model_type if model_type == "git": lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ : str = None return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowercase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase_ : Any = None if generate_kwargs is None: lowercase_ : Optional[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ : Dict = model_inputs.pop(self.model.main_input_name ) lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ): lowercase_ : List[str] = [] for output_ids in model_outputs: lowercase_ : Union[str, Any] = { """generated_text""": self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_ ) return records
21
0
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowerCamelCase ( UpperCAmelCase__ : NDArray[floataa] , UpperCAmelCase__ : NDArray[floataa] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , ) -> list[float]: lowercase_ : Optional[Any] = coefficient_matrix.shape lowercase_ : Optional[int] = constant_matrix.shape if rowsa != colsa: lowercase_ : Optional[int] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(UpperCAmelCase__ ) if colsa != 1: lowercase_ : Any = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(UpperCAmelCase__ ) if rowsa != rowsa: lowercase_ : Tuple = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) != rowsa: lowercase_ : Dict = ( """Number of initial values must be equal to number of rows in coefficient """ F'''matrix but received {len(UpperCAmelCase__ )} and {rowsa}''' ) raise ValueError(UpperCAmelCase__ ) if iterations <= 0: raise ValueError("""Iterations must be at least 1""" ) lowercase_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowercase_ : str = table.shape strictly_diagonally_dominant(UpperCAmelCase__ ) # Iterates the whole matrix for given number of times for _ in range(UpperCAmelCase__ ): lowercase_ : List[Any] = [] for row in range(UpperCAmelCase__ ): lowercase_ : Optional[Any] = 0 for col in range(UpperCAmelCase__ ): if col == row: lowercase_ : Tuple = table[row][col] elif col == cols - 1: lowercase_ : Optional[int] = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowercase_ : Dict = (temp + val) / denom new_val.append(UpperCAmelCase__ ) lowercase_ : List[str] = new_val return [float(UpperCAmelCase__ ) for i in new_val] def lowerCamelCase ( UpperCAmelCase__ : NDArray[floataa] ) -> bool: lowercase_ : Union[str, Any] = table.shape lowercase_ : Optional[int] = True for i in range(0 , UpperCAmelCase__ ): lowercase_ : List[str] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
358
'''simple docstring''' class __magic_name__ : def __init__( self : int , lowercase_ : list ): lowercase_ : Dict = set_counts lowercase_ : List[Any] = max(lowercase_ ) lowercase_ : str = len(lowercase_ ) lowercase_ : str = [1] * num_sets lowercase_ : Dict = list(range(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = self.get_parent(lowercase_ ) lowercase_ : Union[str, Any] = self.get_parent(lowercase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : List[str] = 0 lowercase_ : Optional[int] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : int = 0 lowercase_ : List[Any] = src_parent lowercase_ : List[Any] = self.set_counts[src_parent] lowercase_ : Tuple = max(self.max_set , lowercase_ ) return True def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ): if self.parents[disj_set] == disj_set: return disj_set lowercase_ : int = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
21
0
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowercase : List[Any] = 16 _lowercase : List[str] = 32 def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 ): lowercase_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowercase_ : int = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCAmelCase__ : int ): # max_length=None => use the model max length (it's actually the default) lowercase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase_ : Optional[int] = datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCAmelCase__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase_ : List[str] = 16 elif accelerator.mixed_precision != "no": lowercase_ : Union[str, Any] = 8 else: lowercase_ : List[str] = None return tokenizer.pad( UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowercase_ : List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) lowercase_ : Tuple = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowercase : Optional[int] = mocked_dataloaders # noqa: F811 def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase__ ) == "1": lowercase_ : List[str] = 2 # Initialize accelerator lowercase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ : List[Any] = config["""lr"""] lowercase_ : Optional[Any] = int(config["""num_epochs"""] ) lowercase_ : Optional[int] = int(config["""seed"""] ) lowercase_ : Any = int(config["""batch_size"""] ) lowercase_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=UpperCAmelCase__ ) def inner_training_loop(UpperCAmelCase__ : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(UpperCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer lowercase_ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase__ ) lowercase_ : int = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ ) # Instantiate scheduler lowercase_ : Optional[Any] = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ : Optional[int] = accelerator.prepare( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Now we train the model for epoch in range(UpperCAmelCase__ ): model.train() for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase_ : str = model(**UpperCAmelCase__ ) lowercase_ : Tuple = outputs.loss accelerator.backward(UpperCAmelCase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ : Tuple = model(**UpperCAmelCase__ ) lowercase_ : int = outputs.logits.argmax(dim=-1 ) lowercase_ : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , ) lowercase_ : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def lowerCamelCase ( ): lowercase_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowercase_ : int = parser.parse_args() lowercase_ : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": main()
359
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
0
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any]=False ) -> str: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Optional[Any] = len(set_a.intersection(UpperCAmelCase__ ) ) if alternative_union: lowercase_ : Dict = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) else: lowercase_ : List[Any] = len(set_a.union(UpperCAmelCase__ ) ) return intersection / union if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(UpperCAmelCase__ , (list, tuple) ): lowercase_ : List[str] = [element for element in set_a if element in set_b] if alternative_union: lowercase_ : Optional[Any] = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) / union else: lowercase_ : str = set_a + [element for element in set_b if element not in set_a] return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ ) return None if __name__ == "__main__": _lowercase : Union[str, Any] = {"a", "b", "c", "d", "e"} _lowercase : Dict = {"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
360
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : List[str] = logging.get_logger(__name__) _lowercase : Optional[int] = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''decision_transformer''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[Any] , lowercase_ : Dict=17 , lowercase_ : Tuple=4 , lowercase_ : Optional[int]=128 , lowercase_ : Optional[Any]=4096 , lowercase_ : Tuple=True , lowercase_ : Dict=1 , lowercase_ : Tuple=1024 , lowercase_ : Tuple=3 , lowercase_ : Optional[int]=1 , lowercase_ : Optional[int]=None , lowercase_ : List[str]="relu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=1E-5 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Dict=50256 , lowercase_ : str=50256 , lowercase_ : Tuple=False , lowercase_ : str=False , **lowercase_ : str , ): lowercase_ : Tuple = state_dim lowercase_ : Optional[int] = act_dim lowercase_ : str = hidden_size lowercase_ : Dict = max_ep_len lowercase_ : Any = action_tanh lowercase_ : Union[str, Any] = vocab_size lowercase_ : List[str] = n_positions lowercase_ : Optional[int] = n_layer lowercase_ : Tuple = n_head lowercase_ : Any = n_inner lowercase_ : Optional[Any] = activation_function lowercase_ : List[str] = resid_pdrop lowercase_ : str = embd_pdrop lowercase_ : Dict = attn_pdrop lowercase_ : List[Any] = layer_norm_epsilon lowercase_ : Dict = initializer_range lowercase_ : Tuple = scale_attn_weights lowercase_ : Optional[Any] = use_cache lowercase_ : Optional[int] = scale_attn_by_inverse_layer_idx lowercase_ : Any = reorder_and_upcast_attn lowercase_ : Optional[Any] = bos_token_id lowercase_ : Optional[Any] = eos_token_id super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
361
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ): lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[Any] = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def lowerCamelCase ( ) -> Any: lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Any = Image.open(dataset[4]["""file"""] ) lowercase_ : Dict = Image.open(dataset[5]["""file"""] ) lowercase_ : int = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[int] = prepare_images() # test non-batched lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Union[str, Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
21
0
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class __magic_name__ : UpperCamelCase__ = None UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None UpperCamelCase__ = 1 UpperCamelCase__ = None UpperCamelCase__ = False UpperCamelCase__ = None UpperCamelCase__ = None def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
362
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging _lowercase : Dict = logging.get_logger(__name__) _lowercase : Dict = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''trajectory_transformer''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = { '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : int , lowercase_ : Tuple=100 , lowercase_ : Optional[int]=5 , lowercase_ : Optional[int]=1 , lowercase_ : int=1 , lowercase_ : Any=249 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=17 , lowercase_ : Tuple=25 , lowercase_ : Any=4 , lowercase_ : int=4 , lowercase_ : Optional[Any]=128 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Union[str, Any]=0.00_06 , lowercase_ : List[Any]=512 , lowercase_ : Tuple=0.02 , lowercase_ : List[str]=1E-12 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=1 , lowercase_ : Optional[int]=50256 , lowercase_ : Union[str, Any]=50256 , **lowercase_ : int , ): lowercase_ : str = vocab_size lowercase_ : Union[str, Any] = action_weight lowercase_ : int = reward_weight lowercase_ : List[Any] = value_weight lowercase_ : Dict = max_position_embeddings lowercase_ : List[str] = block_size lowercase_ : Optional[Any] = action_dim lowercase_ : Any = observation_dim lowercase_ : Union[str, Any] = transition_dim lowercase_ : int = learning_rate lowercase_ : Tuple = n_layer lowercase_ : Optional[int] = n_head lowercase_ : Any = n_embd lowercase_ : str = embd_pdrop lowercase_ : int = attn_pdrop lowercase_ : int = resid_pdrop lowercase_ : Any = initializer_range lowercase_ : List[Any] = layer_norm_eps lowercase_ : int = kaiming_initializer_range lowercase_ : List[str] = use_cache super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
363
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
21
0
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __magic_name__ ( _UpperCAmelCase): def __init__( self : List[str] , lowercase_ : Optional[int]=0.01 , lowercase_ : Any=1000 ): lowercase_ : List[Any] = p_stop lowercase_ : List[Any] = max_length def __iter__( self : Any ): lowercase_ : List[str] = 0 lowercase_ : Union[str, Any] = False while not stop and count < self.max_length: yield count count += 1 lowercase_ : Optional[Any] = random.random() < self.p_stop class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str]=False , lowercase_ : List[Any]=True ): lowercase_ : List[str] = [ BatchSamplerShard(lowercase_ , 2 , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) for i in range(2 ) ] lowercase_ : Dict = [list(lowercase_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(lowercase_ ) for shard in batch_sampler_shards] , [len(lowercase_ ) for e in expected] ) self.assertListEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): # Check the shards when the dataset is a round multiple of total batch size. lowercase_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowercase_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowercase_ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is very small. lowercase_ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Any = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # Check the shards when the dataset is a round multiple of batch size. lowercase_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowercase_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size. lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowercase_ : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowercase_ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowercase_ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : str = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is very small. lowercase_ : int = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Tuple = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : List[str] = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): # Check the shards when the dataset is a round multiple of total batch size. lowercase_ : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowercase_ : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowercase_ : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowercase_ : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is very small. lowercase_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : List[str] = [[[0, 1]], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowercase_ : Any = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): # Check the shards when the dataset is a round multiple of batch size. lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowercase_ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size. lowercase_ : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowercase_ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowercase_ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : int = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowercase_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : int = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is very small. lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : int = [[[0, 1]], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowercase_ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : str = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] lowercase_ : str = [BatchSamplerShard(lowercase_ , 2 , lowercase_ , even_batches=lowercase_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple=False , lowercase_ : Dict=2 , lowercase_ : Optional[int]=False ): random.seed(lowercase_ ) lowercase_ : List[str] = list(lowercase_ ) lowercase_ : Optional[Any] = [ IterableDatasetShard( lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ , num_processes=lowercase_ , process_index=lowercase_ , split_batches=lowercase_ , ) for i in range(lowercase_ ) ] lowercase_ : List[str] = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(lowercase_ ) iterable_dataset_lists.append(list(lowercase_ ) ) lowercase_ : Dict = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size lowercase_ : str = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) self.assertTrue(len(lowercase_ ) % shard_batch_size == 0 ) lowercase_ : int = [] for idx in range(0 , len(lowercase_ ) , lowercase_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(lowercase_ ) < len(lowercase_ ): reference += reference self.assertListEqual(lowercase_ , reference[: len(lowercase_ )] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Tuple = 42 lowercase_ : Dict = RandomIterableDataset() self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) # Edge case with a very small dataset lowercase_ : Tuple = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowercase_ ) lowercase_ : int = SkipBatchSampler(lowercase_ , 2 ) self.assertListEqual(list(lowercase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 ) lowercase_ : int = skip_first_batches(lowercase_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): Accelerator() lowercase_ : Tuple = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
364
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
0
'''simple docstring''' import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __magic_name__ ( _UpperCAmelCase): def __init__( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any]=13 , lowercase_ : List[str]=7 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]=True , lowercase_ : int=99 , lowercase_ : Any=32 , lowercase_ : List[str]=5 , lowercase_ : List[str]=4 , lowercase_ : Optional[int]=64 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Any=3 , lowercase_ : List[Any]=4 , lowercase_ : List[str]=None , lowercase_ : str=2 , lowercase_ : Tuple=2 , lowercase_ : Optional[int]=2 , lowercase_ : Tuple=2 , lowercase_ : Optional[Any]=4 , lowercase_ : str=1 , ): lowercase_ : Union[str, Any] = parent lowercase_ : Optional[Any] = batch_size lowercase_ : List[str] = seq_length lowercase_ : Dict = is_training lowercase_ : Union[str, Any] = use_input_mask lowercase_ : Tuple = use_token_type_ids lowercase_ : Optional[int] = use_labels lowercase_ : List[Any] = vocab_size lowercase_ : Dict = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : int = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : Optional[int] = max_position_embeddings lowercase_ : str = type_vocab_size lowercase_ : Optional[Any] = type_sequence_label_size lowercase_ : Optional[int] = initializer_range lowercase_ : Optional[int] = num_labels lowercase_ : int = num_choices lowercase_ : List[str] = scope lowercase_ : Optional[Any] = q_groups lowercase_ : Union[str, Any] = k_groups lowercase_ : Any = v_groups lowercase_ : Optional[int] = post_attention_groups lowercase_ : Union[str, Any] = intermediate_groups lowercase_ : Optional[Any] = output_groups def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ : str = None if self.use_input_mask: lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Any = None lowercase_ : Optional[int] = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ): lowercase_ : int = SqueezeBertModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = model(lowercase_ , lowercase_ ) lowercase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Optional[int] ): lowercase_ : Optional[Any] = SqueezeBertForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Dict = SqueezeBertForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Union[str, Any] = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : int ): lowercase_ : Optional[Any] = self.num_labels lowercase_ : Optional[Any] = SqueezeBertForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Union[str, Any] ): lowercase_ : List[Any] = self.num_labels lowercase_ : int = SqueezeBertForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Dict = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : int ): lowercase_ : Any = self.num_choices lowercase_ : Any = SqueezeBertForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ : Optional[Any] = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : List[str] = self.prepare_config_and_inputs() (lowercase_) : Union[str, Any] = config_and_inputs lowercase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase__ = ( { '''feature-extraction''': SqueezeBertModel, '''fill-mask''': SqueezeBertForMaskedLM, '''question-answering''': SqueezeBertForQuestionAnswering, '''text-classification''': SqueezeBertForSequenceClassification, '''token-classification''': SqueezeBertForTokenClassification, '''zero-shot''': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = SqueezeBertModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , dim=37 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : int = SqueezeBertModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_sentencepiece @require_tokenizers @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : List[str] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" ) lowercase_ : str = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) lowercase_ : List[Any] = model(lowercase_ )[0] lowercase_ : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape , lowercase_ ) lowercase_ : Dict = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] ) self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
365
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
21
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Optional[int] = logging.get_logger(__name__) _lowercase : Tuple = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''roberta''' def __init__( self : Dict , lowercase_ : Any=50265 , lowercase_ : Optional[Any]=768 , lowercase_ : List[str]=12 , lowercase_ : Any=12 , lowercase_ : Optional[int]=3072 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : List[str]=2 , lowercase_ : Any=0.02 , lowercase_ : Union[str, Any]=1E-12 , lowercase_ : Optional[Any]=1 , lowercase_ : Optional[int]=0 , lowercase_ : Tuple=2 , lowercase_ : Any="absolute" , lowercase_ : int=True , lowercase_ : Union[str, Any]=None , **lowercase_ : Tuple , ): super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = vocab_size lowercase_ : Optional[Any] = hidden_size lowercase_ : List[Any] = num_hidden_layers lowercase_ : Any = num_attention_heads lowercase_ : Union[str, Any] = hidden_act lowercase_ : int = intermediate_size lowercase_ : List[str] = hidden_dropout_prob lowercase_ : int = attention_probs_dropout_prob lowercase_ : Optional[Any] = max_position_embeddings lowercase_ : Dict = type_vocab_size lowercase_ : str = initializer_range lowercase_ : int = layer_norm_eps lowercase_ : Union[str, Any] = position_embedding_type lowercase_ : Optional[int] = use_cache lowercase_ : Optional[int] = classifier_dropout class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): if self.task == "multiple-choice": lowercase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
366
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
0
'''simple docstring''' _lowercase : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } _lowercase : int = {value: key for key, value in encode_dict.items()} def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: lowercase_ : int = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: if set(UpperCAmelCase__ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) lowercase_ : str = """""" for word in coded.split(): while len(UpperCAmelCase__ ) != 0: decoded += decode_dict[word[:5]] lowercase_ : List[Any] = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
367
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
0
'''simple docstring''' from __future__ import annotations from scipy.special import comb # type: ignore class __magic_name__ : def __init__( self : Union[str, Any] , lowercase_ : list[tuple[float, float]] ): lowercase_ : int = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowercase_ : Dict = len(lowercase_ ) - 1 def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : float ): assert 0 <= t <= 1, "Time t must be between 0 and 1." lowercase_ : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , lowercase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(lowercase_ ) , 5 ) == 1 return output_values def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : float ): assert 0 <= t <= 1, "Time t must be between 0 and 1." lowercase_ : List[str] = self.basis_function(lowercase_ ) lowercase_ : Tuple = 0.0 lowercase_ : Dict = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : float = 0.01 ): from matplotlib import pyplot as plt # type: ignore lowercase_ : list[float] = [] # x coordinates of points to plot lowercase_ : list[float] = [] # y coordinates of points to plot lowercase_ : Tuple = 0.0 while t <= 1: lowercase_ : Any = self.bezier_curve_function(lowercase_ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowercase_ : Any = [i[0] for i in self.list_of_points] lowercase_ : List[str] = [i[1] for i in self.list_of_points] plt.plot( lowercase_ , lowercase_ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(lowercase_ , lowercase_ , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
368
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
0
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
369
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
370
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
0
def lowerCamelCase ( UpperCAmelCase__ : int = 1000 ) -> int: lowercase_ : List[Any] = -1 lowercase_ : int = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowercase_ : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a) lowercase_ : List[Any] = n - a - b if c * c == (a * a + b * b): lowercase_ : int = a * b * c if candidate >= product: lowercase_ : Dict = candidate return product if __name__ == "__main__": print(f"""{solution() = }""")
371
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
0
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
350
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowercase : Any = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> List[List[ImageInput]]: if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(UpperCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(UpperCAmelCase__ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = ['''pixel_values'''] def __init__( self : Optional[Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : List[str] , ): super().__init__(**lowercase_ ) lowercase_ : Optional[int] = size if size is not None else {"""shortest_edge""": 256} lowercase_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase_ : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase_ : List[str] = get_size_dict(lowercase_ , param_name="""crop_size""" ) lowercase_ : str = do_resize lowercase_ : Dict = size lowercase_ : Union[str, Any] = do_center_crop lowercase_ : int = crop_size lowercase_ : int = resample lowercase_ : int = do_rescale lowercase_ : str = rescale_factor lowercase_ : Tuple = offset lowercase_ : Any = do_normalize lowercase_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): lowercase_ : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" in size: lowercase_ : str = get_resize_output_image_size(lowercase_ , size["""shortest_edge"""] , default_to_square=lowercase_ ) elif "height" in size and "width" in size: lowercase_ : List[Any] = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ): lowercase_ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): lowercase_ : List[str] = image.astype(np.floataa ) if offset: lowercase_ : Optional[int] = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. lowercase_ : Dict = to_numpy_array(lowercase_ ) if do_resize: lowercase_ : Union[str, Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) if do_center_crop: lowercase_ : Union[str, Any] = self.center_crop(lowercase_ , size=lowercase_ ) if do_rescale: lowercase_ : Union[str, Any] = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_ ) if do_normalize: lowercase_ : Tuple = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) lowercase_ : str = to_channel_dimension_format(lowercase_ , lowercase_ ) return image def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Tuple , ): lowercase_ : int = do_resize if do_resize is not None else self.do_resize lowercase_ : Tuple = resample if resample is not None else self.resample lowercase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale lowercase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ : Tuple = offset if offset is not None else self.offset lowercase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean lowercase_ : List[str] = image_std if image_std is not None else self.image_std lowercase_ : Dict = size if size is not None else self.size lowercase_ : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase_ : List[Any] = crop_size if crop_size is not None else self.crop_size lowercase_ : Optional[Any] = get_size_dict(lowercase_ , param_name="""crop_size""" ) if not valid_images(lowercase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowercase_ : Union[str, Any] = make_batched(lowercase_ ) lowercase_ : Any = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] lowercase_ : List[str] = {"""pixel_values""": videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
351
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "PIL.Image.Image" UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
21
0
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
352
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""iterations must be defined as integers""" ) if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not number >= 1: raise ValueError( """starting number must be and integer and be more than 0""" ) if not iterations >= 1: raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" ) lowercase_ : Dict = """""" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCAmelCase__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
353
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
21
0
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : List[str]=13 , lowercase_ : str=30 , lowercase_ : List[str]=2 , lowercase_ : str=3 , lowercase_ : Dict=True , lowercase_ : int=True , lowercase_ : List[str]=32 , lowercase_ : List[str]=5 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=37 , lowercase_ : Dict="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Dict=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=0.6 , lowercase_ : Dict=None , ): lowercase_ : Any = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = image_size lowercase_ : Dict = patch_size lowercase_ : Dict = num_channels lowercase_ : Dict = is_training lowercase_ : Any = use_labels lowercase_ : Optional[int] = hidden_size lowercase_ : List[str] = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : Dict = intermediate_size lowercase_ : Dict = hidden_act lowercase_ : int = hidden_dropout_prob lowercase_ : Dict = attention_probs_dropout_prob lowercase_ : Union[str, Any] = type_sequence_label_size lowercase_ : Union[str, Any] = initializer_range lowercase_ : Tuple = mask_ratio lowercase_ : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase_ : str = (image_size // patch_size) ** 2 lowercase_ : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : List[Any] = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = ViTMAEModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Union[str, Any] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Any = ViTMAEForPreTraining(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Any = model(lowercase_ ) lowercase_ : Union[str, Any] = (self.image_size // self.patch_size) ** 2 lowercase_ : Union[str, Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase_ : Dict = 1 lowercase_ : Union[str, Any] = ViTMAEForPreTraining(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ : Union[str, Any] = model(lowercase_ ) lowercase_ : Any = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : str = self.prepare_config_and_inputs() lowercase_ : List[str] = config_and_inputs lowercase_ : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () UpperCamelCase__ = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : List[Any] = ViTMAEModelTester(self ) lowercase_ : List[str] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Any ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[int] = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Tuple = model_class(lowercase_ ) lowercase_ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : List[Any] = [*signature.parameters.keys()] lowercase_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str , lowercase_ : Dict ): # make masks reproducible np.random.seed(2 ) lowercase_ : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowercase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase_ : Optional[int] = torch.from_numpy(lowercase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase_ : List[Any] = pt_noise super().check_pt_tf_models(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase_ : Dict = outputs[0].cpu().numpy() lowercase_ : Union[str, Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = model_class.from_pretrained(lowercase_ ) model.to(lowercase_ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowercase_ : Optional[int] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) # Make sure we don't have nans lowercase_ : Optional[int] = after_outputs[0].cpu().numpy() lowercase_ : Any = 0 lowercase_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def SCREAMING_SNAKE_CASE_ ( self : str ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def SCREAMING_SNAKE_CASE_ ( self : str ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): pass @slow def SCREAMING_SNAKE_CASE_ ( self : int ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = ViTMAEModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowerCamelCase ( ) -> List[str]: lowercase_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : int ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowercase_ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowercase_ ) lowercase_ : Dict = self.default_image_processor lowercase_ : int = prepare_img() lowercase_ : List[str] = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase_ : Union[str, Any] = ViTMAEConfig() lowercase_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase_ : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowercase_ : Union[str, Any] = model(**lowercase_ , noise=torch.from_numpy(lowercase_ ).to(device=lowercase_ ) ) # verify the logits lowercase_ : int = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase_ : int = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase_ ) , atol=1E-4 ) )
354
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _lowercase : List[Any] = logging.get_logger(__name__) _lowercase : Optional[int] = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''blenderbot-small''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Optional[int] , lowercase_ : Any=50265 , lowercase_ : Tuple=512 , lowercase_ : Tuple=8 , lowercase_ : Optional[int]=2048 , lowercase_ : Optional[int]=16 , lowercase_ : Dict=8 , lowercase_ : str=2048 , lowercase_ : List[Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , lowercase_ : List[Any]="gelu" , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Any=1 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : int=2 , lowercase_ : Tuple=2 , **lowercase_ : List[Any] , ): lowercase_ : Optional[int] = vocab_size lowercase_ : Union[str, Any] = max_position_embeddings lowercase_ : List[Any] = d_model lowercase_ : Optional[int] = encoder_ffn_dim lowercase_ : List[Any] = encoder_layers lowercase_ : List[str] = encoder_attention_heads lowercase_ : List[Any] = decoder_ffn_dim lowercase_ : Optional[Any] = decoder_layers lowercase_ : List[str] = decoder_attention_heads lowercase_ : int = dropout lowercase_ : Optional[int] = attention_dropout lowercase_ : int = activation_dropout lowercase_ : int = activation_function lowercase_ : Any = init_std lowercase_ : str = encoder_layerdrop lowercase_ : List[str] = decoder_layerdrop lowercase_ : Optional[int] = use_cache lowercase_ : Any = encoder_layers lowercase_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class __magic_name__ ( _UpperCAmelCase): @property def SCREAMING_SNAKE_CASE_ ( self : int ): if self.task in ["default", "seq2seq-lm"]: lowercase_ : List[Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: lowercase_ : Any = {0: """batch"""} lowercase_ : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: lowercase_ : List[str] = {0: """batch""", 1: """decoder_sequence"""} lowercase_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. lowercase_ : List[str] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: lowercase_ : Optional[Any] = self.num_layers for i in range(lowercase_ ): lowercase_ : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""} lowercase_ : Any = {0: """batch""", 2: """past_sequence + sequence"""} else: lowercase_ : str = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE_ ( self : str ): if self.task in ["default", "seq2seq-lm"]: lowercase_ : List[str] = super().outputs else: lowercase_ : str = super(lowercase_ , self ).outputs if self.use_past: lowercase_ : int = self.num_layers for i in range(lowercase_ ): lowercase_ : str = {0: """batch""", 2: """past_sequence + sequence"""} lowercase_ : str = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): lowercase_ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs lowercase_ : Dict = seq_length if not self.use_past else 1 lowercase_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase_ : Any = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowercase_ : Optional[int] = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowercase_ : Optional[int] = common_inputs["""input_ids"""].shape lowercase_ : str = common_inputs["""decoder_input_ids"""].shape[1] lowercase_ : Optional[int] = self.num_attention_heads lowercase_ : Optional[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase_ : Dict = decoder_seq_length + 3 lowercase_ : Tuple = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowercase_ : Optional[int] = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) lowercase_ : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowercase_ : Dict = self.num_layers lowercase_ : Any = min(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = max(lowercase_ , lowercase_ ) - min_num_layers lowercase_ : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. lowercase_ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): lowercase_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowercase_ : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowercase_ : List[str] = seqlen + 2 lowercase_ : List[str] = self.num_layers lowercase_ : Optional[Any] = self.num_attention_heads lowercase_ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase_ : Optional[Any] = common_inputs["""attention_mask"""].dtype lowercase_ : List[Any] = torch.cat( [common_inputs["""attention_mask"""], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) lowercase_ : List[Any] = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase_ : Tuple = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase_ : Dict = tokenizer.num_special_tokens_to_add(lowercase_ ) lowercase_ : Optional[Any] = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence lowercase_ : List[str] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size lowercase_ : str = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: lowercase_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) elif self.task == "causal-lm": lowercase_ : List[Any] = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: lowercase_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str ): if self.task in ["default", "seq2seq-lm"]: lowercase_ : Tuple = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: lowercase_ : List[Any] = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ )
355
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
0
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> str: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ ) lowercase_ : List[str] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase_ : List[Any] = dataset_size < in_memory_max_size else: lowercase_ : Optional[int] = False lowercase_ : Any = is_small_dataset(UpperCAmelCase__ ) assert result == expected
356
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
0
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
357
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ): lowercase_ : Optional[Any] = {} lowercase_ : Tuple = {} if prompt is not None: lowercase_ : Tuple = prompt if generate_kwargs is not None: lowercase_ : List[str] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase_ : str = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ): lowercase_ : List[Any] = load_image(lowercase_ ) if prompt is not None: if not isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase_ : List[Any] = self.model.config.model_type if model_type == "git": lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ : str = None return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowercase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase_ : Any = None if generate_kwargs is None: lowercase_ : Optional[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ : Dict = model_inputs.pop(self.model.main_input_name ) lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ): lowercase_ : List[str] = [] for output_ids in model_outputs: lowercase_ : Union[str, Any] = { """generated_text""": self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_ ) return records
21
0
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
358
'''simple docstring''' class __magic_name__ : def __init__( self : int , lowercase_ : list ): lowercase_ : Dict = set_counts lowercase_ : List[Any] = max(lowercase_ ) lowercase_ : str = len(lowercase_ ) lowercase_ : str = [1] * num_sets lowercase_ : Dict = list(range(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = self.get_parent(lowercase_ ) lowercase_ : Union[str, Any] = self.get_parent(lowercase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : List[str] = 0 lowercase_ : Optional[int] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : int = 0 lowercase_ : List[Any] = src_parent lowercase_ : List[Any] = self.set_counts[src_parent] lowercase_ : Tuple = max(self.max_set , lowercase_ ) return True def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ): if self.parents[disj_set] == disj_set: return disj_set lowercase_ : int = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
21
0
'''simple docstring''' import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Dict=0 ): lowercase_ : Optional[Any] = np.random.RandomState(lowercase_ ) lowercase_ : Optional[int] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : int = self.get_dummy_inputs() lowercase_ : Dict = pipe(**lowercase_ ).images lowercase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase_ : List[str] = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowercase_ : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : List[Any] = self.get_dummy_inputs() lowercase_ : Optional[Any] = pipe(**lowercase_ ).images lowercase_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase_ : Dict = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowercase_ : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Tuple = self.get_dummy_inputs() lowercase_ : Optional[int] = pipe(**lowercase_ ).images lowercase_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase_ : Any = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowercase_ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Optional[int] = self.get_dummy_inputs() lowercase_ : str = pipe(**lowercase_ ).images lowercase_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase_ : Any = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowercase_ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Tuple = self.get_dummy_inputs() lowercase_ : Optional[Any] = pipe(**lowercase_ ).images lowercase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase_ : List[Any] = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowercase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Optional[Any] = self.get_dummy_inputs() lowercase_ : Dict = pipe(**lowercase_ ).images lowercase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase_ : Union[str, Any] = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Tuple = self.get_dummy_inputs() lowercase_ : Optional[int] = 3 * [inputs["""prompt"""]] # forward lowercase_ : Optional[int] = pipe(**lowercase_ ) lowercase_ : List[Any] = output.images[0, -3:, -3:, -1] lowercase_ : Union[str, Any] = self.get_dummy_inputs() lowercase_ : str = 3 * [inputs.pop("""prompt""" )] lowercase_ : Optional[Any] = pipe.tokenizer( lowercase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="""np""" , ) lowercase_ : Optional[int] = text_inputs["""input_ids"""] lowercase_ : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] lowercase_ : int = prompt_embeds # forward lowercase_ : Union[str, Any] = pipe(**lowercase_ ) lowercase_ : List[Any] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Dict = self.get_dummy_inputs() lowercase_ : Dict = 3 * ["""this is a negative prompt"""] lowercase_ : Optional[int] = negative_prompt lowercase_ : str = 3 * [inputs["""prompt"""]] # forward lowercase_ : int = pipe(**lowercase_ ) lowercase_ : Union[str, Any] = output.images[0, -3:, -3:, -1] lowercase_ : Optional[int] = self.get_dummy_inputs() lowercase_ : Any = 3 * [inputs.pop("""prompt""" )] lowercase_ : Any = [] for p in [prompt, negative_prompt]: lowercase_ : str = pipe.tokenizer( lowercase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="""np""" , ) lowercase_ : Optional[Any] = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) lowercase_ : Optional[Any] = embeds # forward lowercase_ : Union[str, Any] = pipe(**lowercase_ ) lowercase_ : Optional[int] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class __magic_name__ ( unittest.TestCase): @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Dict = ort.SessionOptions() lowercase_ : List[str] = False return options def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # using the PNDM scheduler by default lowercase_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Optional[Any] = """A painting of a squirrel eating a burger""" np.random.seed(0 ) lowercase_ : Optional[int] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" ) lowercase_ : List[Any] = output.images lowercase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase_ : Tuple = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Any = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) lowercase_ : int = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : List[Any] = """open neural network exchange""" lowercase_ : Any = np.random.RandomState(0 ) lowercase_ : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type="""np""" ) lowercase_ : int = output.images lowercase_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase_ : List[Any] = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[str] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) lowercase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Union[str, Any] = """open neural network exchange""" lowercase_ : Dict = np.random.RandomState(0 ) lowercase_ : Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type="""np""" ) lowercase_ : Union[str, Any] = output.images lowercase_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase_ : Tuple = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Tuple = 0 def test_callback_fn(lowercase_ : int , lowercase_ : int , lowercase_ : np.ndarray ) -> None: lowercase_ : Optional[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) lowercase_ : Tuple = latents[0, -3:, -3:, -1] lowercase_ : str = np.array( [-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) lowercase_ : Optional[Any] = latents[0, -3:, -3:, -1] lowercase_ : str = np.array( [-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 lowercase_ : Dict = False lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Tuple = """Andromeda galaxy in a bottle""" lowercase_ : List[Any] = np.random.RandomState(0 ) pipe( prompt=lowercase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase_ , callback=lowercase_ , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Any = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(lowercase_ , lowercase_ ) assert pipe.safety_checker is None lowercase_ : str = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase_ ) lowercase_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(lowercase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowercase_ : Optional[int] = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None
359
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
0
def lowerCamelCase ( UpperCAmelCase__ : list ) -> list: if len(UpperCAmelCase__ ) < 2: return collection def circle_sort_util(UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: lowercase_ : str = False if low == high: return swapped lowercase_ : str = low lowercase_ : Optional[Any] = high while left < right: if collection[left] > collection[right]: lowercase_ : str = ( collection[right], collection[left], ) lowercase_ : Optional[int] = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowercase_ : List[Any] = ( collection[right + 1], collection[left], ) lowercase_ : Optional[Any] = True lowercase_ : Dict = low + int((high - low) / 2 ) lowercase_ : List[str] = circle_sort_util(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = circle_sort_util(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) return swapped or left_swap or right_swap lowercase_ : List[Any] = True while is_not_sorted is True: lowercase_ : Tuple = circle_sort_util(UpperCAmelCase__ , 0 , len(UpperCAmelCase__ ) - 1 ) return collection if __name__ == "__main__": _lowercase : Dict = input("Enter numbers separated by a comma:\n").strip() _lowercase : int = [int(item) for item in user_input.split(",")] print(circle_sort(unsorted))
360
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Optional[Any] = "▁" _lowercase : Dict = {"vocab_file": "spiece.model"} _lowercase : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"} } _lowercase : Optional[int] = { "google/pegasus-xsum": 512, } _lowercase : Dict = logging.get_logger(__name__) class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , lowercase_ : int , lowercase_ : int="<pad>" , lowercase_ : Tuple="</s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : str="<mask_1>" , lowercase_ : str=None , lowercase_ : Optional[int]=103 , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[int] , ): lowercase_ : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(lowercase_ , lowercase_ ): raise TypeError( f'''additional_special_tokens should be of type {type(lowercase_ )}, but is''' f''' {type(lowercase_ )}''' ) lowercase_ : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowercase_ ) , self.offset - 1 ) ] if len(set(lowercase_ ) ) != len(lowercase_ ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) lowercase_ : str = additional_special_tokens_extended else: lowercase_ : int = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] lowercase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) lowercase_ : Any = mask_token_sent lowercase_ : Any = vocab_file lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) # add special tokens to encoder dict lowercase_ : Dict[int, str] = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) lowercase_ : Dict[str, int] = {v: k for k, v in self.encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self : str ): return len(self.sp_model ) + self.offset def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Any ): lowercase_ : str = self.__dict__.copy() lowercase_ : int = None return state def __setstate__( self : Optional[Any] , lowercase_ : int ): lowercase_ : int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase_ : Dict = {} lowercase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str ): return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] lowercase_ : str = self.sp_model.piece_to_id(lowercase_ ) return sp_id + self.offset def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: lowercase_ : List[str] = self.sp_model.IdToPiece(index - self.offset ) return token def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] ): lowercase_ : int = [] lowercase_ : int = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase_ ) + token lowercase_ : List[str] = [] else: current_sub_tokens.append(lowercase_ ) out_string += self.sp_model.decode(lowercase_ ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any]=False ): return 1 def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] ): lowercase_ : Union[str, Any] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(lowercase_ ) elif token_ids_a is None: return self._special_token_mask(lowercase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Any=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ): if not os.path.isdir(lowercase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : Optional[Any] = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , """wb""" ) as fi: lowercase_ : Dict = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
361
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ): lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[Any] = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def lowerCamelCase ( ) -> Any: lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Any = Image.open(dataset[4]["""file"""] ) lowercase_ : Dict = Image.open(dataset[5]["""file"""] ) lowercase_ : int = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[int] = prepare_images() # test non-batched lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Union[str, Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
21
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( UpperCAmelCase__ : Any ) -> str: lowercase_ : List[Any] = filter(lambda UpperCAmelCase__ : p.requires_grad , model.parameters() ) lowercase_ : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params _lowercase : Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Tuple: if metric == "rouge2": lowercase_ : List[str] = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": lowercase_ : List[str] = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": lowercase_ : int = """{val_avg_em:.4f}-{step_count}""" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' """ function.""" ) lowercase_ : Dict = ModelCheckpoint( dirpath=UpperCAmelCase__ , filename=UpperCAmelCase__ , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]: return EarlyStopping( monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCAmelCase__ , verbose=UpperCAmelCase__ , ) class __magic_name__ ( pl.Callback ): def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Any , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowercase_ ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule , lowercase_ : str , lowercase_ : Any=True ): logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) lowercase_ : Union[str, Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results lowercase_ : Optional[int] = Path(pl_module.hparams.output_dir ) if type_path == "test": lowercase_ : Optional[Any] = od / """test_results.txt""" lowercase_ : str = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowercase_ : Any = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' lowercase_ : Tuple = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=lowercase_ ) generations_file.parent.mkdir(exist_ok=lowercase_ ) with open(lowercase_ , """a+""" ) as writer: for key in sorted(lowercase_ ): if key in ["log", "progress_bar", "preds"]: continue lowercase_ : List[str] = metrics[key] if isinstance(lowercase_ , torch.Tensor ): lowercase_ : List[str] = val.item() lowercase_ : int = f'''{key}: {val:.6f}\n''' writer.write(lowercase_ ) if not save_generations: return if "preds" in metrics: lowercase_ : int = """\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(lowercase_ ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Any , lowercase_ : Optional[Any] ): try: lowercase_ : List[Any] = pl_module.model.model.num_parameters() except AttributeError: lowercase_ : Union[str, Any] = pl_module.model.num_parameters() lowercase_ : Union[str, Any] = count_trainable_parameters(lowercase_ ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowercase_ , lowercase_ , """test""" ) @rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : Optional[int] ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
362
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int ) -> list[int]: if num <= 0: raise ValueError("""Input must be a positive integer""" ) lowercase_ : int = [True] * (num + 1) lowercase_ : int = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCAmelCase__ ): lowercase_ : List[Any] = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Any = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
363
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
21
0
from collections import defaultdict from math import gcd def lowerCamelCase ( UpperCAmelCase__ : int = 1500000 ) -> int: lowercase_ : defaultdict = defaultdict(UpperCAmelCase__ ) lowercase_ : Any = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase__ , 2 ): if gcd(UpperCAmelCase__ , UpperCAmelCase__ ) > 1: continue lowercase_ : Tuple = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(UpperCAmelCase__ , limit + 1 , UpperCAmelCase__ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f"""{solution() = }""")
364
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
0
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _lowercase : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1) _lowercase : Tuple = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __magic_name__ : UpperCamelCase__ = 42 UpperCamelCase__ = 42 class __magic_name__ : def __init__( self : Union[str, Any] , lowercase_ : Iterable[int] ): lowercase_ : Node | None = None for i in sorted(lowercase_ , reverse=lowercase_ ): lowercase_ : Optional[int] = Node(lowercase_ , self.head ) def __iter__( self : Optional[int] ): lowercase_ : Tuple = self.head while node: yield node.data lowercase_ : str = node.next_node def __len__( self : str ): return sum(1 for _ in self ) def __str__( self : List[str] ): return " -> ".join([str(lowercase_ ) for node in self] ) def lowerCamelCase ( UpperCAmelCase__ : SortedLinkedList , UpperCAmelCase__ : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowercase : List[str] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
365
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
21
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : Optional[Any] = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''levit''' def __init__( self : Optional[Any] , lowercase_ : List[Any]=224 , lowercase_ : Optional[Any]=3 , lowercase_ : Any=3 , lowercase_ : Optional[Any]=2 , lowercase_ : Dict=1 , lowercase_ : int=16 , lowercase_ : Union[str, Any]=[128, 256, 384] , lowercase_ : Dict=[4, 8, 12] , lowercase_ : Tuple=[4, 4, 4] , lowercase_ : str=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : str=[2, 2, 2] , lowercase_ : Tuple=[2, 2, 2] , lowercase_ : Optional[int]=0.02 , **lowercase_ : List[str] , ): super().__init__(**lowercase_ ) lowercase_ : List[Any] = image_size lowercase_ : Dict = num_channels lowercase_ : Any = kernel_size lowercase_ : Any = stride lowercase_ : int = padding lowercase_ : List[str] = hidden_sizes lowercase_ : str = num_attention_heads lowercase_ : Dict = depths lowercase_ : str = key_dim lowercase_ : str = drop_path_rate lowercase_ : List[Any] = patch_size lowercase_ : str = attention_ratio lowercase_ : List[Any] = mlp_ratio lowercase_ : str = initializer_range lowercase_ : List[str] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = version.parse('''1.11''') @property def SCREAMING_SNAKE_CASE_ ( self : int ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): return 1E-4
366
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
0
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCamelCase ( ) -> Dict: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(UpperCAmelCase__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowerCamelCase ( ) -> int: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowerCamelCase ( ) -> Union[str, Any]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(UpperCAmelCase__ ): http_head("""https://huggingface.co""" )
367
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) lowercase_ : List[str] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" lowercase_ : Union[str, Any] = str(bin(UpperCAmelCase__ ) )[2:] lowercase_ : Tuple = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int("""1""" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
368
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
0
'''simple docstring''' from collections import deque class __magic_name__ : def __init__( self : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ): lowercase_ : Optional[Any] = process_name # process name lowercase_ : Any = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowercase_ : List[Any] = arrival_time lowercase_ : Any = burst_time # remaining burst time lowercase_ : Tuple = 0 # total time of the process wait in ready queue lowercase_ : Any = 0 # time from arrival time to completion time class __magic_name__ : def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : list[int] , lowercase_ : deque[Process] , lowercase_ : int , ): # total number of mlfq's queues lowercase_ : Tuple = number_of_queues # time slice of queues that round robin algorithm applied lowercase_ : List[str] = time_slices # unfinished process is in this ready_queue lowercase_ : Dict = queue # current time lowercase_ : Optional[Any] = current_time # finished process is in this sequence queue lowercase_ : deque[Process] = deque() def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : str = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : list[Process] ): lowercase_ : Tuple = [] for i in range(len(lowercase_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : list[Process] ): lowercase_ : Union[str, Any] = [] for i in range(len(lowercase_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : list[Process] ): lowercase_ : str = [] for i in range(len(lowercase_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : deque[Process] ): return [q.burst_time for q in queue] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : deque[Process] ): lowercase_ : deque[Process] = deque() # sequence deque of finished process while len(lowercase_ ) != 0: lowercase_ : Union[str, Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowercase_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowercase_ : int = 0 # set the process's turnaround time because it is finished lowercase_ : Optional[Any] = self.current_time - cp.arrival_time # set the completion time lowercase_ : Optional[Any] = self.current_time # add the process to queue that has finished queue finished.append(lowercase_ ) self.finish_queue.extend(lowercase_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : deque[Process] , lowercase_ : int ): lowercase_ : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowercase_ ) ): lowercase_ : List[str] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowercase_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowercase_ : List[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowercase_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowercase_ : str = 0 # set the finish time lowercase_ : List[str] = self.current_time # update the process' turnaround time because it is finished lowercase_ : Dict = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowercase_ ) self.finish_queue.extend(lowercase_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def SCREAMING_SNAKE_CASE_ ( self : int ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): lowercase_ : Dict = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _lowercase : Optional[int] = Process("P1", 0, 53) _lowercase : Any = Process("P2", 0, 17) _lowercase : Optional[Any] = Process("P3", 0, 68) _lowercase : Optional[Any] = Process("P4", 0, 24) _lowercase : Optional[Any] = 3 _lowercase : List[str] = [17, 25] _lowercase : int = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) _lowercase : Dict = Process("P1", 0, 53) _lowercase : List[str] = Process("P2", 0, 17) _lowercase : Any = Process("P3", 0, 68) _lowercase : List[Any] = Process("P4", 0, 24) _lowercase : str = 3 _lowercase : List[str] = [17, 25] _lowercase : Optional[Any] = deque([Pa, Pa, Pa, Pa]) _lowercase : Dict = MLFQ(number_of_queues, time_slices, queue, 0) _lowercase : Union[str, Any] = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( f"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( f"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
369
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: _lowercase : List[str] = None _lowercase : int = logging.get_logger(__name__) _lowercase : List[str] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _lowercase : Dict = { "vocab_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model", }, "tokenizer_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json", }, } _lowercase : Optional[Any] = { "albert-base-v1": 512, "albert-large-v1": 512, "albert-xlarge-v1": 512, "albert-xxlarge-v1": 512, "albert-base-v2": 512, "albert-large-v2": 512, "albert-xlarge-v2": 512, "albert-xxlarge-v2": 512, } _lowercase : Tuple = "▁" class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = AlbertTokenizer def __init__( self : Any , lowercase_ : int=None , lowercase_ : str=None , lowercase_ : Optional[int]=True , lowercase_ : Dict=True , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[Any]="[CLS]" , lowercase_ : Any="[SEP]" , lowercase_ : List[str]="<unk>" , lowercase_ : Dict="[SEP]" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : List[str]="[CLS]" , lowercase_ : str="[MASK]" , **lowercase_ : Optional[Any] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowercase_ : Optional[int] = ( AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token ) super().__init__( lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , ) lowercase_ : Union[str, Any] = do_lower_case lowercase_ : List[str] = remove_space lowercase_ : str = keep_accents lowercase_ : str = vocab_file lowercase_ : int = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): lowercase_ : List[str] = [self.sep_token_id] lowercase_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): lowercase_ : Tuple = [self.sep_token_id] lowercase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowercase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : Any = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) return (out_vocab_file,)
370
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
0
import numpy as np import qiskit def lowerCamelCase ( UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int | None = None ) -> str: lowercase_ : Tuple = np.random.default_rng(seed=UpperCAmelCase__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. lowercase_ : Dict = 6 * key_len # Measurement basis for Alice's qubits. lowercase_ : int = rng.integers(2 , size=UpperCAmelCase__ ) # The set of states Alice will prepare. lowercase_ : Dict = rng.integers(2 , size=UpperCAmelCase__ ) # Measurement basis for Bob's qubits. lowercase_ : Tuple = rng.integers(2 , size=UpperCAmelCase__ ) # Quantum Circuit to simulate BB84 lowercase_ : int = qiskit.QuantumCircuit(UpperCAmelCase__ , name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(UpperCAmelCase__ ): if alice_state[index] == 1: bbaa_circ.x(UpperCAmelCase__ ) if alice_basis[index] == 1: bbaa_circ.h(UpperCAmelCase__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(UpperCAmelCase__ ): if bob_basis[index] == 1: bbaa_circ.h(UpperCAmelCase__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. lowercase_ : int = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. lowercase_ : List[str] = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1 , seed_simulator=UpperCAmelCase__ ) # Returns the result of measurement. lowercase_ : Any = job.result().get_counts(UpperCAmelCase__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. lowercase_ : Union[str, Any] = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. lowercase_ : Union[str, Any] = gen_key[:key_len] if len(UpperCAmelCase__ ) >= key_len else gen_key.ljust(UpperCAmelCase__ , """0""" ) return key if __name__ == "__main__": print(f"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
371
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[str] = """laion/clap-htsat-unfused""" lowercase_ : Dict = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **lowercase_ : Any ): return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : str ): return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Union[str, Any] = self.get_tokenizer() lowercase_ : Optional[Any] = self.get_feature_extractor() lowercase_ : str = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase_ : Optional[int] = self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 ) lowercase_ : Optional[int] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Dict = self.get_feature_extractor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) lowercase_ : Optional[int] = floats_list((3, 1000) ) lowercase_ : int = feature_extractor(lowercase_ , return_tensors="""np""" ) lowercase_ : List[Any] = processor(audios=lowercase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : List[str] = self.get_feature_extractor() lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) lowercase_ : Optional[Any] = """This is a test string""" lowercase_ : str = processor(text=lowercase_ ) lowercase_ : int = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.get_feature_extractor() lowercase_ : Dict = self.get_tokenizer() lowercase_ : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(lowercase_ ) lowercase_ : Tuple = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = self.get_feature_extractor() lowercase_ : int = self.get_tokenizer() lowercase_ : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
350
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]: # Initialise PyTorch model lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _lowercase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
0
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = ['''image_processor''', '''tokenizer'''] UpperCamelCase__ = '''BlipImageProcessor''' UpperCamelCase__ = '''AutoTokenizer''' def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ): super().__init__(lowercase_ , lowercase_ ) # add QFormer tokenizer lowercase_ : Optional[int] = qformer_tokenizer def __call__( self : List[Any] , lowercase_ : ImageInput = None , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) lowercase_ : List[str] = BatchFeature() if text is not None: lowercase_ : Optional[Any] = self.tokenizer( text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) encoding.update(lowercase_ ) lowercase_ : Tuple = self.qformer_tokenizer( text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) lowercase_ : List[Any] = qformer_text_encoding.pop("""input_ids""" ) lowercase_ : List[Any] = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: lowercase_ : str = self.image_processor(lowercase_ , return_tensors=lowercase_ ) encoding.update(lowercase_ ) return encoding def SCREAMING_SNAKE_CASE_ ( self : List[str] , *lowercase_ : int , **lowercase_ : Any ): return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ): return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Tuple = self.tokenizer.model_input_names lowercase_ : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , **lowercase_ : Optional[int] ): if os.path.isfile(lowercase_ ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) lowercase_ : List[Any] = os.path.join(lowercase_ , """qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(lowercase_ ) return super().save_pretrained(lowercase_ , **lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , lowercase_ : Any , **lowercase_ : str ): lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ , subfolder="""qformer_tokenizer""" ) lowercase_ : List[Any] = cls._get_arguments_from_pretrained(lowercase_ , **lowercase_ ) args.append(lowercase_ ) return cls(*lowercase_ )
351
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = "PIL.Image.Image" UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
21
0
'''simple docstring''' from math import factorial _lowercase : List[str] = {str(d): factorial(d) for d in range(10)} def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: return sum(DIGIT_FACTORIAL[d] for d in str(UpperCAmelCase__ ) ) def lowerCamelCase ( ) -> int: lowercase_ : Tuple = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , UpperCAmelCase__ ) if sum_of_digit_factorial(UpperCAmelCase__ ) == i ) if __name__ == "__main__": print(f"""{solution() = }""")
352
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float: lowercase_ : List[Any] = x lowercase_ : Any = y for step in range(UpperCAmelCase__ ): # noqa: B007 lowercase_ : Dict = a * a - b * b + x lowercase_ : str = 2 * a * b + y lowercase_ : Optional[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image: lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) ) lowercase_ : Tuple = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates lowercase_ : Any = figure_width / image_width * image_height lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ ) else: lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
21
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase _lowercase : Optional[int] = logging.get_logger(__name__) _lowercase : int = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''longformer''' def __init__( self : Dict , lowercase_ : Union[List[int], int] = 512 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 0 , lowercase_ : int = 2 , lowercase_ : int = 30522 , lowercase_ : int = 768 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 3072 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 512 , lowercase_ : int = 2 , lowercase_ : float = 0.02 , lowercase_ : float = 1E-12 , lowercase_ : bool = False , **lowercase_ : List[str] , ): super().__init__(pad_token_id=lowercase_ , **lowercase_ ) lowercase_ : List[Any] = attention_window lowercase_ : Dict = sep_token_id lowercase_ : str = bos_token_id lowercase_ : str = eos_token_id lowercase_ : Dict = vocab_size lowercase_ : Optional[int] = hidden_size lowercase_ : List[Any] = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Any = hidden_act lowercase_ : int = intermediate_size lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Optional[int] = max_position_embeddings lowercase_ : Dict = type_vocab_size lowercase_ : Tuple = initializer_range lowercase_ : Tuple = layer_norm_eps lowercase_ : Tuple = onnx_export class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : "PretrainedConfig" , lowercase_ : str = "default" , lowercase_ : "List[PatchingSpec]" = None ): super().__init__(lowercase_ , lowercase_ , lowercase_ ) lowercase_ : Dict = True @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): if self.task == "multiple-choice": lowercase_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""global_attention_mask""", dynamic_axis), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : int = super().outputs if self.task == "default": lowercase_ : Optional[int] = {0: """batch"""} return outputs @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return 1E-4 @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : "PreTrainedTokenizerBase" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): lowercase_ : str = super().generate_dummy_inputs( preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowercase_ : int = torch.zeros_like(inputs["""input_ids"""] ) # make every second token global lowercase_ : List[Any] = 1 return inputs
353
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = DistilBertTokenizer UpperCamelCase__ = DistilBertTokenizerFast UpperCamelCase__ = True @slow def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ ) lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ ) lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
21
0
'''simple docstring''' class __magic_name__ : def __init__( self : Optional[Any] ): lowercase_ : int = """""" lowercase_ : str = """""" lowercase_ : Dict = [] def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : int ): if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: lowercase_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: lowercase_ : Union[str, Any] = self.__min_dist_top_down_dp(lowercase_ , n - 1 ) lowercase_ : List[Any] = self.__min_dist_top_down_dp(m - 1 , lowercase_ ) lowercase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) lowercase_ : Optional[Any] = 1 + min(lowercase_ , lowercase_ , lowercase_ ) return self.dp[m][n] def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : str ): lowercase_ : Any = worda lowercase_ : Union[str, Any] = worda lowercase_ : Optional[int] = [[-1 for _ in range(len(lowercase_ ) )] for _ in range(len(lowercase_ ) )] return self.__min_dist_top_down_dp(len(lowercase_ ) - 1 , len(lowercase_ ) - 1 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : str ): lowercase_ : Any = worda lowercase_ : int = worda lowercase_ : str = len(lowercase_ ) lowercase_ : int = len(lowercase_ ) lowercase_ : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty lowercase_ : Tuple = j elif j == 0: # second string is empty lowercase_ : str = i elif worda[i - 1] == worda[j - 1]: # last characters are equal lowercase_ : Any = self.dp[i - 1][j - 1] else: lowercase_ : str = self.dp[i][j - 1] lowercase_ : Union[str, Any] = self.dp[i - 1][j] lowercase_ : List[Any] = self.dp[i - 1][j - 1] lowercase_ : int = 1 + min(lowercase_ , lowercase_ , lowercase_ ) return self.dp[m][n] if __name__ == "__main__": _lowercase : int = EditDistance() print("****************** Testing Edit Distance DP Algorithm ******************") print() _lowercase : List[str] = input("Enter the first string: ").strip() _lowercase : Union[str, Any] = input("Enter the second string: ").strip() print() print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print("*************** End of Testing Edit Distance DP Algorithm ***************")
354
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = CycleDiffusionPipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''}) UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE_ ( self : int ): torch.manual_seed(0 ) lowercase_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase_ : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) lowercase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase_ : int = CLIPTextModel(lowercase_ ) lowercase_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ : Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : int=0 ): lowercase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowercase_ : int = image / 2 + 0.5 if str(lowercase_ ).startswith("""mps""" ): lowercase_ : Dict = torch.manual_seed(lowercase_ ) else: lowercase_ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase_ : Optional[int] = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase_ : Any = self.get_dummy_components() lowercase_ : List[Any] = CycleDiffusionPipeline(**lowercase_ ) lowercase_ : Any = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Dict = self.get_dummy_inputs(lowercase_ ) lowercase_ : Any = pipe(**lowercase_ ) lowercase_ : Dict = output.images lowercase_ : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) lowercase_ : List[Any] = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , """half""" ): lowercase_ : List[str] = module.half() lowercase_ : str = CycleDiffusionPipeline(**lowercase_ ) lowercase_ : Optional[Any] = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : str = self.get_dummy_inputs(lowercase_ ) lowercase_ : List[Any] = pipe(**lowercase_ ) lowercase_ : int = output.images lowercase_ : Union[str, Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) lowercase_ : List[Any] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): return super().test_inference_batch_single_identical() @skip_mps def SCREAMING_SNAKE_CASE_ ( self : Dict ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def SCREAMING_SNAKE_CASE_ ( self : str ): return super().test_save_load_optional_components() @skip_mps def SCREAMING_SNAKE_CASE_ ( self : str ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) lowercase_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) lowercase_ : int = init_image.resize((512, 512) ) lowercase_ : Optional[int] = """CompVis/stable-diffusion-v1-4""" lowercase_ : Any = DDIMScheduler.from_pretrained(lowercase_ , subfolder="""scheduler""" ) lowercase_ : List[Any] = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowercase_ : Tuple = """A black colored car""" lowercase_ : Optional[Any] = """A blue colored car""" lowercase_ : List[Any] = torch.manual_seed(0 ) lowercase_ : Union[str, Any] = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type="""np""" , ) lowercase_ : List[str] = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) lowercase_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) lowercase_ : str = init_image.resize((512, 512) ) lowercase_ : Any = """CompVis/stable-diffusion-v1-4""" lowercase_ : int = DDIMScheduler.from_pretrained(lowercase_ , subfolder="""scheduler""" ) lowercase_ : Any = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowercase_ : List[Any] = """A black colored car""" lowercase_ : List[Any] = """A blue colored car""" lowercase_ : Any = torch.manual_seed(0 ) lowercase_ : Optional[Any] = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type="""np""" , ) lowercase_ : List[str] = output.images assert np.abs(image - expected_image ).max() < 2E-2
355
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
0
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports _lowercase : List[Any] = "\nimport os\n" _lowercase : List[str] = "\ndef foo():\n import os\n return False\n" _lowercase : List[str] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" _lowercase : Any = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" _lowercase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" _lowercase : List[Any] = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" _lowercase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" _lowercase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" _lowercase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" _lowercase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" _lowercase : Any = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("""case""" , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> Union[str, Any]: lowercase_ : Tuple = os.path.join(UpperCAmelCase__ , """test_file.py""" ) with open(UpperCAmelCase__ , """w""" ) as _tmp_file: _tmp_file.write(UpperCAmelCase__ ) lowercase_ : Optional[Any] = get_imports(UpperCAmelCase__ ) assert parsed_imports == ["os"]
356
'''simple docstring''' import os import numpy import onnx def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple: lowercase_ : Tuple = a.name lowercase_ : Tuple = b.name lowercase_ : Any = """""" lowercase_ : List[Any] = """""" lowercase_ : List[Any] = a == b lowercase_ : Union[str, Any] = name_a lowercase_ : Optional[Any] = name_b return res def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]: lowercase_ : int = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : Optional[Any] = inits[i].name lowercase_ : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]: lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ ) lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ ) lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase_ : List[Any] = list(model.graph.initializer ) lowercase_ : int = set() lowercase_ : int = {} lowercase_ : str = [] lowercase_ : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) lowercase_ : Dict = inits[j].data_type lowercase_ : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size lowercase_ : int = inits[i].name lowercase_ : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowercase_ : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : Union[str, Any] = """optimized_""" + model_file_name lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
21
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = DDIMPipeline UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS UpperCamelCase__ = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''latents''', '''callback''', '''callback_steps''', } UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): torch.manual_seed(0 ) lowercase_ : str = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) lowercase_ : Optional[Any] = DDIMScheduler() lowercase_ : Tuple = {"""unet""": unet, """scheduler""": scheduler} return components def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] , lowercase_ : List[Any]=0 ): if str(lowercase_ ).startswith("""mps""" ): lowercase_ : List[str] = torch.manual_seed(lowercase_ ) else: lowercase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase_ : Dict = { """batch_size""": 1, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = """cpu""" lowercase_ : Union[str, Any] = self.get_dummy_components() lowercase_ : str = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase_ : List[str] = self.get_dummy_inputs(lowercase_ ) lowercase_ : str = pipe(**lowercase_ ).images lowercase_ : Dict = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) lowercase_ : str = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) lowercase_ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : int ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): super().test_save_load_local(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Any ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Optional[Any] = """google/ddpm-cifar10-32""" lowercase_ : Any = UNetaDModel.from_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = DDIMScheduler() lowercase_ : str = DDIMPipeline(unet=lowercase_ , scheduler=lowercase_ ) ddim.to(lowercase_ ) ddim.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Tuple = torch.manual_seed(0 ) lowercase_ : Dict = ddim(generator=lowercase_ , eta=0.0 , output_type="""numpy""" ).images lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase_ : str = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = """google/ddpm-ema-bedroom-256""" lowercase_ : Any = UNetaDModel.from_pretrained(lowercase_ ) lowercase_ : int = DDIMScheduler.from_pretrained(lowercase_ ) lowercase_ : Any = DDIMPipeline(unet=lowercase_ , scheduler=lowercase_ ) ddpm.to(lowercase_ ) ddpm.set_progress_bar_config(disable=lowercase_ ) lowercase_ : Optional[Any] = torch.manual_seed(0 ) lowercase_ : Tuple = ddpm(generator=lowercase_ , output_type="""numpy""" ).images lowercase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowercase_ : Tuple = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
357
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ): lowercase_ : Optional[Any] = {} lowercase_ : Tuple = {} if prompt is not None: lowercase_ : Tuple = prompt if generate_kwargs is not None: lowercase_ : List[str] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase_ : str = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ): lowercase_ : List[Any] = load_image(lowercase_ ) if prompt is not None: if not isinstance(lowercase_ , lowercase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase_ : List[Any] = self.model.config.model_type if model_type == "git": lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework ) lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ : str = None return model_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowercase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase_ : Any = None if generate_kwargs is None: lowercase_ : Optional[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ : Dict = model_inputs.pop(self.model.main_input_name ) lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ): lowercase_ : List[str] = [] for output_ids in model_outputs: lowercase_ : Union[str, Any] = { """generated_text""": self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_ ) return records
21
0
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
358
'''simple docstring''' class __magic_name__ : def __init__( self : int , lowercase_ : list ): lowercase_ : Dict = set_counts lowercase_ : List[Any] = max(lowercase_ ) lowercase_ : str = len(lowercase_ ) lowercase_ : str = [1] * num_sets lowercase_ : Dict = list(range(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = self.get_parent(lowercase_ ) lowercase_ : Union[str, Any] = self.get_parent(lowercase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : List[str] = 0 lowercase_ : Optional[int] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : int = 0 lowercase_ : List[Any] = src_parent lowercase_ : List[Any] = self.set_counts[src_parent] lowercase_ : Tuple = max(self.max_set , lowercase_ ) return True def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ): if self.parents[disj_set] == disj_set: return disj_set lowercase_ : int = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
21
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ): while second != 0: lowercase_ : str = first & second first ^= second lowercase_ : str = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Tuple = int(input("Enter the first number: ").strip()) _lowercase : Any = int(input("Enter the second number: ").strip()) print(f"""{add(first, second) = }""")
359
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowercase : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase) class __magic_name__ ( _UpperCAmelCase): def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ): super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , """decord""" ) self.check_model_type(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ): lowercase_ : Union[str, Any] = {} if frame_sampling_rate is not None: lowercase_ : Any = frame_sampling_rate if num_frames is not None: lowercase_ : Optional[Any] = num_frames lowercase_ : Union[str, Any] = {} if top_k is not None: lowercase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ): return super().__call__(lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ): if num_frames is None: lowercase_ : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content ) lowercase_ : Optional[Any] = VideoReader(lowercase_ ) videoreader.seek(0 ) lowercase_ : Tuple = 0 lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1 lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa ) lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy() lowercase_ : Union[str, Any] = list(lowercase_ ) lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ): lowercase_ : int = self.model(**lowercase_ ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ): if top_k > self.model.config.num_labels: lowercase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase_ : str = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
21
0
import heapq def lowerCamelCase ( UpperCAmelCase__ : dict ) -> set[int]: lowercase_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(UpperCAmelCase__ , [-1 * len(UpperCAmelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices lowercase_ : List[Any] = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices lowercase_ : Tuple = heapq.heappop(UpperCAmelCase__ )[1][0] chosen_vertices.add(UpperCAmelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: lowercase_ : Union[str, Any] = elem[1][1].index(UpperCAmelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(UpperCAmelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
360
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
0
'''simple docstring''' import re from filelock import FileLock try: import nltk _lowercase : Any = True except (ImportError, ModuleNotFoundError): _lowercase : Union[str, Any] = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: re.sub("""<n>""" , """""" , UpperCAmelCase__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(UpperCAmelCase__ ) )
361
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ): lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18} lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Optional[Any] = num_channels lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Optional[int] = do_resize lowercase_ : Optional[Any] = size lowercase_ : Union[str, Any] = do_normalize def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """clusters""" ) ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowercase_ ) lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict() lowercase_ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowercase_ ) lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowercase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE_ ( self : Any ): pass def lowerCamelCase ( ) -> Any: lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowercase_ : Any = Image.open(dataset[4]["""file"""] ) lowercase_ : Dict = Image.open(dataset[5]["""file"""] ) lowercase_ : int = [imagea, imagea] return images @require_vision @require_torch class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase_ : Optional[int] = prepare_images() # test non-batched lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ ) # test batched lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowercase_ : Union[str, Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
21
0
'''simple docstring''' from sklearn.metrics import recall_score import datasets _lowercase : Any = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" _lowercase : Tuple = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" _lowercase : Union[str, Any] = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=1 , lowercase_ : Any="binary" , lowercase_ : Optional[Any]=None , lowercase_ : Any="warn" , ): lowercase_ : Union[str, Any] = recall_score( lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ , zero_division=lowercase_ , ) return {"recall": float(lowercase_ ) if score.size == 1 else score}
362
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: return x if y == 0 else greatest_common_divisor(UpperCAmelCase__ , x % y ) def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: return (x * y) // greatest_common_divisor(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : int = 20 ) -> int: lowercase_ : Dict = 1 for i in range(1 , n + 1 ): lowercase_ : Optional[int] = lcm(UpperCAmelCase__ , UpperCAmelCase__ ) return g if __name__ == "__main__": print(f"""{solution() = }""")
363
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ): super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): if audio_length_in_s is None: lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate lowercase_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) lowercase_ : List[Any] = int(lowercase_ ) if sample_size % down_scale_factor != 0: lowercase_ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) lowercase_ : Any = int(lowercase_ ) lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) # set step values self.scheduler.set_timesteps(lowercase_ , device=audio.device ) lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase_ )
21
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowercase : str = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class __magic_name__ : UpperCamelCase__ = field( default='''cifar10''', metadata={'''help''': '''Name of a dataset from the datasets package'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''The column name of the images in the files.'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''A folder containing the training data.'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''A folder containing the validation data.'''}) UpperCamelCase__ = field( default=0.15, metadata={'''help''': '''Percent to split off of train for validation.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) }, ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : int = {} if self.train_dir is not None: lowercase_ : int = self.train_dir if self.validation_dir is not None: lowercase_ : Optional[int] = self.validation_dir lowercase_ : Optional[Any] = data_files if data_files else None @dataclass class __magic_name__ : UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''}) UpperCamelCase__ = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Name or path of preprocessor config.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) UpperCamelCase__ = field( default=0.75, metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''}) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = field( default=1e-3, metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''}) def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]: lowercase_ : List[Any] = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def lowerCamelCase ( ) -> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , UpperCAmelCase__ , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ : str = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowercase_ : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ : List[str] = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0: lowercase_ : int = ds["""train"""].train_test_split(data_args.train_val_split ) lowercase_ : Union[str, Any] = split["""train"""] lowercase_ : Tuple = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ : Any = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: lowercase_ : Any = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ : List[str] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ : Dict = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: lowercase_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ ) elif model_args.model_name_or_path: lowercase_ : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ ) else: lowercase_ : List[str] = ViTImageProcessor() # create model if model_args.model_name_or_path: lowercase_ : str = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowercase_ : Optional[int] = ViTMAEForPreTraining(UpperCAmelCase__ ) if training_args.do_train: lowercase_ : Union[str, Any] = ds["""train"""].column_names else: lowercase_ : str = ds["""validation"""].column_names if data_args.image_column_name is not None: lowercase_ : Optional[int] = data_args.image_column_name elif "image" in column_names: lowercase_ : Tuple = """image""" elif "img" in column_names: lowercase_ : int = """img""" else: lowercase_ : List[Any] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: lowercase_ : Union[str, Any] = image_processor.size["""shortest_edge"""] else: lowercase_ : str = (image_processor.size["""height"""], image_processor.size["""width"""]) lowercase_ : List[str] = Compose( [ Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(UpperCAmelCase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(UpperCAmelCase__ : List[str] ): lowercase_ : int = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowercase_ : Optional[Any] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCAmelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowercase_ : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCAmelCase__ ) # Compute absolute learning rate lowercase_ : Union[str, Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: lowercase_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer lowercase_ : int = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: lowercase_ : Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ : List[Any] = last_checkpoint lowercase_ : str = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ : int = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Write model card and (optionally) push to hub lowercase_ : Union[str, Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase__ ) else: trainer.create_model_card(**UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any ) -> str: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
364
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Union[str, Any] = "src/transformers" _lowercase : str = "docs/source/en" _lowercase : Union[str, Any] = "." def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int: with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ : Union[str, Any] = f.readlines() # Find the start prompt. lowercase_ : Optional[Any] = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 lowercase_ : int = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any: lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ ) return [m.group(0 ) for m in matches] def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]: lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ ) lowercase_ : List[str] = (width - text_length) // 2 lowercase_ : Dict = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) -> Any: lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ ) lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = None if attr_name.endswith("""Tokenizer""" ): lowercase_ : Optional[int] = slow_tokenizers lowercase_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): lowercase_ : Optional[Any] = fast_tokenizers lowercase_ : Dict = attr_name[:-13] elif _re_tf_models.match(UpperCAmelCase__ ) is not None: lowercase_ : str = tf_models lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0] elif _re_flax_models.match(UpperCAmelCase__ ) is not None: lowercase_ : List[str] = flax_models lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0] elif _re_pt_models.match(UpperCAmelCase__ ) is not None: lowercase_ : Tuple = pt_models lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] ) # Let's build that table! lowercase_ : Dict = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns] lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" lowercase_ : int = {True: """✅""", False: """❌"""} for name in model_names: lowercase_ : str = model_name_to_prefix[name] lowercase_ : Any = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n" return table def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file( filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) lowercase_ : Dict = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
0
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def lowerCamelCase ( ) -> Generator[int, None, None]: """simple docstring""" lowercase_ : dict[int, int] = {} lowercase_ : int = 2 while True: lowercase_ : Tuple = factor_map.pop(UpperCAmelCase__ , UpperCAmelCase__ ) if factor: lowercase_ : Dict = factor + prime while x in factor_map: x += factor lowercase_ : str = factor else: lowercase_ : Union[str, Any] = prime yield prime prime += 1 def lowerCamelCase ( UpperCAmelCase__ : float = 1e10 ) -> int: """simple docstring""" lowercase_ : int = sieve() lowercase_ : int = 1 while True: lowercase_ : Union[str, Any] = next(UpperCAmelCase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(UpperCAmelCase__ ) n += 2 if __name__ == "__main__": print(solution())
365
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __magic_name__ ( ctypes.Structure): # _fields is a specific attr expected by ctypes UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def lowerCamelCase ( ) -> List[Any]: if os.name == "nt": lowercase_ : List[Any] = CursorInfo() lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : List[str] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> str: if os.name == "nt": lowercase_ : int = CursorInfo() lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) lowercase_ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> Any: try: hide_cursor() yield finally: show_cursor()
21
0
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") _lowercase : Union[str, Any] = logging.getLogger(__name__) @dataclass class __magic_name__ : UpperCamelCase__ = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) }, ) @dataclass class __magic_name__ : UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Train language if it is different from the evaluation language.'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''}) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''}, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, ) UpperCamelCase__ = field( default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) }, ) UpperCamelCase__ = field( default=_UpperCAmelCase, metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''}, ) def lowerCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase_ : Optional[int] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_xnli""" , UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ : Optional[int] = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase__ ) datasets.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.set_verbosity(UpperCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: lowercase_ : int = load_dataset( """xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: lowercase_ : int = load_dataset( """xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase_ : Union[str, Any] = train_dataset.features["""label"""].names if training_args.do_eval: lowercase_ : str = load_dataset( """xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase_ : int = eval_dataset.features["""label"""].names if training_args.do_predict: lowercase_ : List[str] = load_dataset( """xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase_ : List[Any] = predict_dataset.features["""label"""].names # Labels lowercase_ : Dict = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel={str(UpperCAmelCase__ ): label for i, label in enumerate(UpperCAmelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase_ : Optional[int] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: lowercase_ : Dict = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase_ : Any = False def preprocess_function(UpperCAmelCase__ : Tuple ): # Tokenize the texts return tokenizer( examples["""premise"""] , examples["""hypothesis"""] , padding=UpperCAmelCase__ , max_length=data_args.max_seq_length , truncation=UpperCAmelCase__ , ) if training_args.do_train: if data_args.max_train_samples is not None: lowercase_ : Optional[int] = min(len(UpperCAmelCase__ ) , data_args.max_train_samples ) lowercase_ : Optional[Any] = train_dataset.select(range(UpperCAmelCase__ ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): lowercase_ : Tuple = train_dataset.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , ) # Log a few random samples from the training set: for index in random.sample(range(len(UpperCAmelCase__ ) ) , 3 ): logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowercase_ : str = min(len(UpperCAmelCase__ ) , data_args.max_eval_samples ) lowercase_ : Tuple = eval_dataset.select(range(UpperCAmelCase__ ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): lowercase_ : Dict = eval_dataset.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: lowercase_ : str = min(len(UpperCAmelCase__ ) , data_args.max_predict_samples ) lowercase_ : Optional[int] = predict_dataset.select(range(UpperCAmelCase__ ) ) with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ): lowercase_ : Dict = predict_dataset.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , ) # Get the metric function lowercase_ : Optional[Any] = evaluate.load("""xnli""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(UpperCAmelCase__ : EvalPrediction ): lowercase_ : int = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase__ ) else p.predictions lowercase_ : Union[str, Any] = np.argmax(UpperCAmelCase__ , axis=1 ) return metric.compute(predictions=UpperCAmelCase__ , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase_ : List[str] = default_data_collator elif training_args.fpaa: lowercase_ : Tuple = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) else: lowercase_ : Optional[int] = None # Initialize our Trainer lowercase_ : int = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: lowercase_ : int = None if training_args.resume_from_checkpoint is not None: lowercase_ : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ : List[str] = last_checkpoint lowercase_ : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase__ ) lowercase_ : Tuple = train_result.metrics lowercase_ : Union[str, Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ ) ) lowercase_ : str = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , UpperCAmelCase__ ) trainer.save_metrics("""train""" , UpperCAmelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase_ : List[Any] = trainer.evaluate(eval_dataset=UpperCAmelCase__ ) lowercase_ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ ) lowercase_ : List[str] = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) ) trainer.log_metrics("""eval""" , UpperCAmelCase__ ) trainer.save_metrics("""eval""" , UpperCAmelCase__ ) # Prediction if training_args.do_predict: logger.info("""*** Predict ***""" ) lowercase_ : str = trainer.predict(UpperCAmelCase__ , metric_key_prefix="""predict""" ) lowercase_ : Optional[int] = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCAmelCase__ ) ) lowercase_ : Optional[Any] = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) ) trainer.log_metrics("""predict""" , UpperCAmelCase__ ) trainer.save_metrics("""predict""" , UpperCAmelCase__ ) lowercase_ : List[str] = np.argmax(UpperCAmelCase__ , axis=1 ) lowercase_ : List[Any] = os.path.join(training_args.output_dir , """predictions.txt""" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , """w""" ) as writer: writer.write("""index\tprediction\n""" ) for index, item in enumerate(UpperCAmelCase__ ): lowercase_ : List[Any] = label_list[item] writer.write(F'''{index}\t{item}\n''' ) if __name__ == "__main__": main()
366
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowercase : int = logging.get_logger(__name__) @dataclass class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **lowercase_ : int ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Optional[int] = deprecated_arg[3:] setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**lowercase_ ) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''}) UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''}) UpperCamelCase__ = field( default='''O1''', metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) }, ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase_ : Optional[Any] = torch.device("""cpu""" ) lowercase_ : Tuple = 0 elif is_torch_tpu_available(): lowercase_ : Optional[int] = xm.xla_device() lowercase_ : str = 0 else: lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : int ): requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int ): return self.n_gpu > 0
21
0
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=1024 , UpperCAmelCase__ : Optional[int]=1024 , UpperCAmelCase__ : List[str]=False , **UpperCAmelCase__ : Dict ) -> Optional[int]: lowercase_ : str = AutoTokenizer.from_pretrained(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = SeqaSeqDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , type_path="""train""" , **UpperCAmelCase__ ) lowercase_ : Optional[int] = tok.pad_token_id def get_lens(UpperCAmelCase__ : Union[str, Any] ): lowercase_ : Optional[int] = tqdm( DataLoader(UpperCAmelCase__ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowercase_ : Union[str, Any] = [] for batch in dl: lowercase_ : List[str] = batch["""input_ids"""].ne(UpperCAmelCase__ ).sum(1 ).tolist() lowercase_ : Any = batch["""labels"""].ne(UpperCAmelCase__ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCAmelCase__ , UpperCAmelCase__ ): max_lens.append(max(UpperCAmelCase__ , UpperCAmelCase__ ) ) else: max_lens.extend(UpperCAmelCase__ ) return max_lens lowercase_ : Optional[Any] = get_lens(UpperCAmelCase__ ) lowercase_ : Tuple = SeqaSeqDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , type_path="""val""" , **UpperCAmelCase__ ) lowercase_ : Optional[int] = get_lens(UpperCAmelCase__ ) pickle_save(UpperCAmelCase__ , train_ds.len_file ) pickle_save(UpperCAmelCase__ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
367
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets _lowercase : Any = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" _lowercase : Optional[Any] = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" _lowercase : List[Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class __magic_name__ ( datasets.Metric): def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : str=None , lowercase_ : Any=1 , lowercase_ : str="binary" , lowercase_ : Union[str, Any]=None ): lowercase_ : Optional[int] = fa_score( lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ ) return {"f1": float(lowercase_ ) if score.size == 1 else score}
368
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase__ , np.ndarray ): return list(tensor.shape ) lowercase_ : Tuple = tf.shape(UpperCAmelCase__ ) if tensor.shape == tf.TensorShape(UpperCAmelCase__ ): return dynamic lowercase_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )] def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase_ : List[Any] = [1] * inputs.shape.rank lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis] lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase_ : str = tf.nn.batch_normalization( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , ) return outputs def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase__ , tf.Tensor ): lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: lowercase_ : int = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) lowercase_ : Any = np.asarray(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ): lowercase_ : Union[str, Any] = chunk_data else: lowercase_ : Any = data def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str: if name in group.attrs: lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]] else: lowercase_ : int = [] lowercase_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any: def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ): if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
21
0
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowercase : Optional[List[str]] = None _lowercase : str = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowercase : Optional[int] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class __magic_name__ : UpperCamelCase__ = True UpperCamelCase__ = None # Automatically constructed UpperCamelCase__ = '''PIL.Image.Image''' UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()}) UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase) def __call__( self : Tuple ): return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowercase_ , lowercase_ ): lowercase_ : int = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: lowercase_ : Union[str, Any] = {} lowercase_ : List[Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowercase_ ): lowercase_ : int = PIL.Image.open(lowercase_ ) else: lowercase_ : str = path.split("""::""" )[-1] try: lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ ) except ValueError: lowercase_ : str = None with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f: lowercase_ : Dict = BytesIO(f.read() ) lowercase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowercase_ : Optional[int] = storage.field("""bytes""" ) else: lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowercase_ : Dict = storage.field("""path""" ) else: lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase_ : Optional[int] = pa.array( [encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() ) lowercase_ : Tuple = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(lowercase_ : Optional[Any] ): with xopen(lowercase_ , """rb""" ) as f: lowercase_ : int = f.read() return bytes_ lowercase_ : Optional[Any] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase_ : Any = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes: lowercase_ : Tuple = BytesIO() if image.format in list_image_compression_formats(): lowercase_ : int = image.format else: lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict: if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) lowercase_ : List[Any] = array.dtype lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER lowercase_ : Dict = dtype.kind lowercase_ : List[Any] = dtype.itemsize lowercase_ : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase_ : int = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase_ : str = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
369
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCamelCase ( UpperCAmelCase__ : int ) -> int: lowercase_ : Any = prime_factors(UpperCAmelCase__ ) if is_square_free(UpperCAmelCase__ ): return -1 if len(UpperCAmelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
21
0
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Any = logging.get_logger(__name__) _lowercase : Optional[int] = { "facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json", "facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json", } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''encodec''' def __init__( self : Optional[int] , lowercase_ : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase_ : Tuple=24000 , lowercase_ : str=1 , lowercase_ : Optional[Any]=False , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , lowercase_ : str=128 , lowercase_ : Tuple=32 , lowercase_ : Dict=1 , lowercase_ : Optional[Any]=[8, 5, 4, 2] , lowercase_ : Optional[int]="weight_norm" , lowercase_ : Tuple=7 , lowercase_ : Union[str, Any]=7 , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=True , lowercase_ : List[Any]="reflect" , lowercase_ : str=2 , lowercase_ : Any=2 , lowercase_ : Tuple=1.0 , lowercase_ : Dict=1024 , lowercase_ : List[Any]=None , lowercase_ : Dict=True , **lowercase_ : str , ): lowercase_ : Union[str, Any] = target_bandwidths lowercase_ : Optional[int] = sampling_rate lowercase_ : Union[str, Any] = audio_channels lowercase_ : str = normalize lowercase_ : Dict = chunk_length_s lowercase_ : Optional[int] = overlap lowercase_ : Any = hidden_size lowercase_ : List[Any] = num_filters lowercase_ : Tuple = num_residual_layers lowercase_ : List[Any] = upsampling_ratios lowercase_ : List[Any] = norm_type lowercase_ : List[str] = kernel_size lowercase_ : Tuple = last_kernel_size lowercase_ : Optional[Any] = residual_kernel_size lowercase_ : Any = dilation_growth_rate lowercase_ : Optional[int] = use_causal_conv lowercase_ : Optional[int] = pad_mode lowercase_ : str = compress lowercase_ : Any = num_lstm_layers lowercase_ : List[str] = trim_right_ratio lowercase_ : Optional[int] = codebook_size lowercase_ : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size lowercase_ : List[Any] = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**lowercase_ ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Union[str, Any] = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
370
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int: lowercase_ : List[Any] = limit + 1 lowercase_ : Optional[Any] = [0] * limit for first_term in range(1 , UpperCAmelCase__ ): for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
21
0
_lowercase : int = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
371
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __magic_name__ ( unittest.TestCase): @parameterized.expand([(None,), ("""foo.json""",)] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ): lowercase_ : Union[str, Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" ) lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Optional[int] = GenerationConfig() lowercase_ : int = { """max_new_tokens""": 1024, """foo""": """bar""", } lowercase_ : List[str] = copy.deepcopy(lowercase_ ) lowercase_ : Tuple = generation_config.update(**lowercase_ ) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {"""foo""": """bar"""} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Dict = GenerationConfig() lowercase_ : int = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ ) assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowercase_ ) self.assertEqual(default_config.num_beams , 1 ) lowercase_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowercase_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ ) lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowercase_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __magic_name__ ( unittest.TestCase): @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any ): lowercase_ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
21
0
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A(__a: Optional[Any] , __a: int , __a: List[str] , __a: int=5 ): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count("<mask>" ) == 1 lowerCAmelCase_ = torch.tensor(tokenizer.encode(__a , add_special_tokens=__a ) ).unsqueeze(0 ) # Batch size 1 lowerCAmelCase_ = model(__a )[0] # The last hidden-state is the first element of the output tuple lowerCAmelCase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() lowerCAmelCase_ = logits[0, masked_index, :] lowerCAmelCase_ = logits.softmax(dim=0 ) lowerCAmelCase_ , lowerCAmelCase_ = prob.topk(k=__a , dim=0 ) lowerCAmelCase_ = " ".join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__a ) )] ) lowerCAmelCase_ = tokenizer.mask_token lowerCAmelCase_ = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ): lowerCAmelCase_ = predicted_token_bpe.replace("\u2581" , " " ) if " {0}".format(__a ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(" {0}".format(__a ) , __a ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(__a , __a ), values[index].item(), predicted_token, ) ) return topk_filled_outputs lowerCamelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''') lowerCamelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() lowerCamelCase__ = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = CanineTokenizer lowerCamelCase__ = False def __a ( self ) -> Optional[int]: super().setUp() lowerCAmelCase_ = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ) -> int: return CanineTokenizer.from_pretrained("google/canine-s" ) def __a ( self , **_a ) -> CanineTokenizer: lowerCAmelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **_a ) lowerCAmelCase_ = 1024 return tokenizer @require_torch def __a ( self ) -> List[str]: lowerCAmelCase_ = self.canine_tokenizer lowerCAmelCase_ = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off lowerCAmelCase_ = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0] # fmt: on lowerCAmelCase_ = tokenizer(_a , padding=_a , return_tensors="pt" ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_a , _a ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def __a ( self ) -> str: lowerCAmelCase_ = self.canine_tokenizer lowerCAmelCase_ = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] lowerCAmelCase_ = tokenizer(_a , padding=_a , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , _a ) self.assertIn("attention_mask" , _a ) self.assertIn("token_type_ids" , _a ) @require_torch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.canine_tokenizer lowerCAmelCase_ = [ "What's the weater?", "It's about 25 degrees.", ] lowerCAmelCase_ = tokenizer( text_target=_a , max_length=32 , padding="max_length" , truncation=_a , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def __a ( self ) -> Dict: # safety check on max_len default value so we are sure the test works lowerCAmelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowerCAmelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = " He is very happy, UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) tokenizer.save_pretrained(_a ) lowerCAmelCase_ = tokenizer.__class__.from_pretrained(_a ) lowerCAmelCase_ = after_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) shutil.rmtree(_a ) lowerCAmelCase_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = " He is very happy, UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: lowerCAmelCase_ = chr(0xe007 ) additional_special_tokens.append(_a ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) tokenizer.save_pretrained(_a ) lowerCAmelCase_ = tokenizer.__class__.from_pretrained(_a ) lowerCAmelCase_ = after_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) self.assertIn(_a , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowerCAmelCase_ = tokenizer.__class__.from_pretrained(_a , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_a ) def __a ( self ) -> int: lowerCAmelCase_ = self.get_tokenizers(do_lower_case=_a ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCAmelCase_ , lowerCAmelCase_ = self.get_clean_sequence(_a ) # a special token for Canine can be defined as follows: lowerCAmelCase_ = 0xe005 lowerCAmelCase_ = chr(_a ) tokenizer.add_special_tokens({"cls_token": special_token} ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) self.assertEqual(len(_a ) , 1 ) lowerCAmelCase_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) self.assertEqual(_a , input_encoded + special_token_id ) lowerCAmelCase_ = tokenizer.decode(_a , skip_special_tokens=_a ) self.assertTrue(special_token not in decoded ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.get_tokenizers(do_lower_case=_a ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCAmelCase_ = chr(0xe005 ) lowerCAmelCase_ = chr(0xe006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_a ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = tokenizer.tokenize(_a ) self.assertEqual(len(_a ) , 1 ) self.assertEqual(len(_a ) , 1 ) self.assertEqual(token_a[0] , _a ) self.assertEqual(token_a[0] , _a ) @require_tokenizers def __a ( self ) -> List[str]: lowerCAmelCase_ = self.get_tokenizers(do_lower_case=_a ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # a special token for Canine can be defined as follows: lowerCAmelCase_ = 0xe006 lowerCAmelCase_ = chr(_a ) lowerCAmelCase_ = AddedToken(_a , lstrip=_a ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(_a ) tokenizer.from_pretrained(_a ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_a ) with open(os.path.join(_a , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: lowerCAmelCase_ = json.load(_a ) with open(os.path.join(_a , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: lowerCAmelCase_ = json.load(_a ) # a special token for Canine can be defined as follows: lowerCAmelCase_ = 0xe006 lowerCAmelCase_ = chr(_a ) lowerCAmelCase_ = [new_token_a] lowerCAmelCase_ = [new_token_a] with open(os.path.join(_a , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(_a , _a ) with open(os.path.join(_a , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(_a , _a ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCAmelCase_ = tokenizer_class.from_pretrained(_a , extra_ids=0 ) self.assertIn(_a , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) lowerCAmelCase_ = 0xe007 lowerCAmelCase_ = chr(_a ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCAmelCase_ = [AddedToken(_a , lstrip=_a )] lowerCAmelCase_ = tokenizer_class.from_pretrained( _a , additional_special_tokens=_a , extra_ids=0 ) self.assertIn(_a , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def __a ( self ) -> List[str]: lowerCAmelCase_ = self.get_tokenizers(do_lower_case=_a ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCAmelCase_ = "hello world" if self.space_between_special_tokens: lowerCAmelCase_ = "[CLS] hello world [SEP]" else: lowerCAmelCase_ = input lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.decode(_a , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(_a , [output, output.lower()] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCAmelCase_ = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] lowerCAmelCase_ = "a" lowerCAmelCase_ = ord(_a ) for attr in attributes_list: setattr(_a , attr + "_id" , _a ) self.assertEqual(getattr(_a , _a ) , _a ) self.assertEqual(getattr(_a , attr + "_id" ) , _a ) setattr(_a , attr + "_id" , _a ) self.assertEqual(getattr(_a , _a ) , _a ) self.assertEqual(getattr(_a , attr + "_id" ) , _a ) setattr(_a , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(_a , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(_a , "additional_special_tokens_ids" ) , [] ) lowerCAmelCase_ = 0xe006 lowerCAmelCase_ = chr(_a ) setattr(_a , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(_a , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(_a , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def __a ( self ) -> Any: pass def __a ( self ) -> Optional[Any]: pass def __a ( self ) -> List[Any]: pass def __a ( self ) -> Tuple: pass def __a ( self ) -> int: pass def __a ( self ) -> Any: pass def __a ( self ) -> Any: pass def __a ( self ) -> List[str]: pass
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
from __future__ import annotations lowerCamelCase__ = '''Muhammad Umer Farooq''' lowerCamelCase__ = '''MIT''' lowerCamelCase__ = '''1.0.0''' lowerCamelCase__ = '''Muhammad Umer Farooq''' lowerCamelCase__ = '''[email protected]''' lowerCamelCase__ = '''Alpha''' import re from html.parser import HTMLParser from urllib import parse import requests class __magic_name__ (__lowercase ): def __init__( self , _a ) -> None: super().__init__() lowerCAmelCase_ = [] lowerCAmelCase_ = domain def __a ( self , _a , _a ) -> None: # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: lowerCAmelCase_ = parse.urljoin(self.domain , _a ) self.urls.append(_a ) def A(__a: str ): return ".".join(get_sub_domain_name(__a ).split("." )[-2:] ) def A(__a: str ): return parse.urlparse(__a ).netloc def A(__a: str = "https://github.com" ): lowerCAmelCase_ = get_domain_name(__a ) # Initialize the parser lowerCAmelCase_ = Parser(__a ) try: # Open URL lowerCAmelCase_ = requests.get(__a ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through lowerCAmelCase_ = set() for link in parser.urls: # open URL. # read = requests.get(link) try: lowerCAmelCase_ = requests.get(__a ) # Get the valid email. lowerCAmelCase_ = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(__a ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(__a ) if __name__ == "__main__": lowerCamelCase__ = emails_from_url('''https://github.com''') print(F'''{len(emails)} emails found:''') print('''\n'''.join(sorted(emails)))
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = LDMTextToImagePipeline lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS - { '''negative_prompt''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', '''prompt_embeds''', } lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''callback''', '''callback_steps''', } lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase__ = False def __a ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) lowerCAmelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ = CLIPTextModel(_a ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCAmelCase_ = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def __a ( self , _a , _a=0 ) -> Union[str, Any]: if str(_a ).startswith("mps" ): lowerCAmelCase_ = torch.manual_seed(_a ) else: lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a ) lowerCAmelCase_ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = LDMTextToImagePipeline(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs(_a ) lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) lowerCAmelCase_ = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self , _a , _a=torch.floataa , _a=0 ) -> int: lowerCAmelCase_ = torch.manual_seed(_a ) lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) ) lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a ) lowerCAmelCase_ = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __a ( self ) -> str: lowerCAmelCase_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_a ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_inputs(_a ) lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] ) lowerCAmelCase_ = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self , _a , _a=torch.floataa , _a=0 ) -> List[str]: lowerCAmelCase_ = torch.manual_seed(_a ) lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) ) lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a ) lowerCAmelCase_ = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_a ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_inputs(_a ) lowerCAmelCase_ = pipe(**_a ).images[0] lowerCAmelCase_ = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) lowerCAmelCase_ = np.abs(expected_image - image ).max() assert max_diff < 1E-3
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class __magic_name__ (__lowercase ): def __init__( self , _a ) -> Optional[Any]: lowerCAmelCase_ = data def __iter__( self ) -> int: for element in self.data: yield element def A(__a: Union[str, Any]=True ): lowerCAmelCase_ = Accelerator(even_batches=__a ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def A(__a: Accelerator , __a: int , __a: int , __a: bool = False ): if iterable: lowerCAmelCase_ = DummyIterableDataset(torch.as_tensor(range(__a ) ) ) else: lowerCAmelCase_ = TensorDataset(torch.as_tensor(range(__a ) ) ) lowerCAmelCase_ = DataLoader(__a , batch_size=__a ) lowerCAmelCase_ = accelerator.prepare(__a ) return dl def A(__a: Accelerator , __a: int , __a: int , __a: List[int] , __a: List[int] , ): lowerCAmelCase_ = create_dataloader(accelerator=__a , dataset_size=__a , batch_size=__a ) lowerCAmelCase_ = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def A(): lowerCAmelCase_ = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def A(): lowerCAmelCase_ = create_accelerator(even_batches=__a ) verify_dataloader_batch_sizes( __a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def A(): lowerCAmelCase_ = create_accelerator(even_batches=__a ) lowerCAmelCase_ = torch.nn.Linear(1 , 1 ) lowerCAmelCase_ = accelerator.prepare(__a ) lowerCAmelCase_ = create_dataloader(__a , dataset_size=3 , batch_size=1 ) lowerCAmelCase_ = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__a ): lowerCAmelCase_ = ddp_model(batch[0].float() ) lowerCAmelCase_ = output.sum() loss.backward() batch_idxs.append(__a ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def A(__a: List[str] ): with warnings.catch_warnings(record=__a ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __a ) assert "only supported for multi-GPU" in str(w[-1].message ) def A(): lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = create_accelerator(even_batches=__a ) lowerCAmelCase_ = torch.nn.Linear(1 , 1 ) lowerCAmelCase_ = accelerator.prepare(__a ) lowerCAmelCase_ = create_dataloader(__a , dataset_size=3 , batch_size=1 ) lowerCAmelCase_ = create_dataloader(__a , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): lowerCAmelCase_ = train_dl.batch_sampler.even_batches lowerCAmelCase_ = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def A(): lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = create_accelerator(even_batches=__a ) lowerCAmelCase_ = torch.nn.Linear(1 , 1 ) lowerCAmelCase_ = accelerator.prepare(__a ) create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a ) lowerCAmelCase_ = create_dataloader(__a , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): lowerCAmelCase_ = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def A(): lowerCAmelCase_ = create_accelerator() lowerCAmelCase_ = torch.nn.Linear(1 , 1 ) lowerCAmelCase_ = accelerator.prepare(__a ) create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a ) with warnings.catch_warnings(record=__a ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): pass assert issubclass(w[-1].category , __a ) assert "only supported for map-style datasets" in str(w[-1].message ) def A(): lowerCAmelCase_ = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) lowerCAmelCase_ = accelerator.state.distributed_type lowerCAmelCase_ = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__a ) lowerCAmelCase_ = original_state if __name__ == "__main__": main()
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
from torch import nn class __magic_name__ (nn.Module ): def __init__( self , _a , _a ) -> Tuple: super().__init__() lowerCAmelCase_ = class_size lowerCAmelCase_ = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowerCAmelCase_ = nn.Linear(_a , _a ) def __a ( self , _a ) -> Tuple: # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowerCAmelCase_ = self.mlp(_a ) return logits
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowerCamelCase__ = TypeVar('''T''') class __magic_name__ (Generic[T] ): def __init__( self , _a , _a ) -> None: lowerCAmelCase_ = None lowerCAmelCase_ = len(_a ) lowerCAmelCase_ = [any_type for _ in range(self.N )] + arr lowerCAmelCase_ = fnc self.build() def __a ( self ) -> None: for p in range(self.N - 1 , 0 , -1 ): lowerCAmelCase_ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __a ( self , _a , _a ) -> None: p += self.N lowerCAmelCase_ = v while p > 1: lowerCAmelCase_ = p // 2 lowerCAmelCase_ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __a ( self , _a , _a ) -> T | None: # noqa: E741 lowerCAmelCase_ , lowerCAmelCase_ = l + self.N, r + self.N lowerCAmelCase_ = None while l <= r: if l % 2 == 1: lowerCAmelCase_ = self.st[l] if res is None else self.fn(_a , self.st[l] ) if r % 2 == 0: lowerCAmelCase_ = self.st[r] if res is None else self.fn(_a , self.st[r] ) lowerCAmelCase_ , lowerCAmelCase_ = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowerCamelCase__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] lowerCamelCase__ = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } lowerCamelCase__ = SegmentTree(test_array, min) lowerCamelCase__ = SegmentTree(test_array, max) lowerCamelCase__ = SegmentTree(test_array, lambda a, b: a + b) def A(): for i in range(len(__a ) ): for j in range(__a , len(__a ) ): lowerCAmelCase_ = reduce(__a , test_array[i : j + 1] ) lowerCAmelCase_ = reduce(__a , test_array[i : j + 1] ) lowerCAmelCase_ = reduce(lambda __a , __a : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__a , __a ) assert max_range == max_segment_tree.query(__a , __a ) assert sum_range == sum_segment_tree.query(__a , __a ) test_all_segments() for index, value in test_updates.items(): lowerCamelCase__ = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
import requests lowerCamelCase__ = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def A(__a: str ): # fetching a list of articles in json format lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(F"{i}.) {article['title']}" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase__ = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
from __future__ import annotations def A(__a: float , __a: float , __a: float ): if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0" ) if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * daily_interest_rate * days_between_payments def A(__a: float , __a: float , __a: float , ): if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def A(__a: float , __a: float , __a: float , ): if number_of_years <= 0: raise ValueError("number_of_years must be > 0" ) if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return compound_interest( __a , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
lowerCamelCase__ = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCamelCase__ = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: lowerCamelCase__ = json.load(f) @require_torch class __magic_name__ (unittest.TestCase ): def __a ( self , _a ) -> Tuple: return FSMTTokenizer.from_pretrained(_a ) def __a ( self , _a ) -> Any: lowerCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 2_6.0], ["ru-en", 2_2.0], ["en-de", 2_2.0], ["de-en", 2_9.0], ] ) @slow def __a ( self , _a , _a ) -> Optional[int]: # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowerCAmelCase_ = f"facebook/wmt19-{pair}" lowerCAmelCase_ = self.get_tokenizer(_a ) lowerCAmelCase_ = self.get_model(_a ) lowerCAmelCase_ = bleu_data[pair]["src"] lowerCAmelCase_ = bleu_data[pair]["tgt"] lowerCAmelCase_ = tokenizer(_a , return_tensors="pt" , truncation=_a , padding="longest" ).to(_a ) lowerCAmelCase_ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCAmelCase_ = tokenizer.batch_decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) lowerCAmelCase_ = calculate_bleu(_a , _a ) print(_a ) self.assertGreaterEqual(scores["bleu"] , _a )
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
# Imports import numpy as np class __magic_name__ : def __init__( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Tuple: self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a ) def __a ( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Union[str, Any]: if red is not None: lowerCAmelCase_ = red if green is not None: lowerCAmelCase_ = green if blue is not None: lowerCAmelCase_ = blue if red_edge is not None: lowerCAmelCase_ = red_edge if nir is not None: lowerCAmelCase_ = nir return True def __a ( self , _a="" , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Tuple: self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a ) lowerCAmelCase_ = { "ARVI2": self.arvaa, "CCCI": self.ccci, "CVI": self.cvi, "GLI": self.gli, "NDVI": self.ndvi, "BNDVI": self.bndvi, "redEdgeNDVI": self.red_edge_ndvi, "GNDVI": self.gndvi, "GBNDVI": self.gbndvi, "GRNDVI": self.grndvi, "RBNDVI": self.rbndvi, "PNDVI": self.pndvi, "ATSAVI": self.atsavi, "BWDRVI": self.bwdrvi, "CIgreen": self.ci_green, "CIrededge": self.ci_rededge, "CI": self.ci, "CTVI": self.ctvi, "GDVI": self.gdvi, "EVI": self.evi, "GEMI": self.gemi, "GOSAVI": self.gosavi, "GSAVI": self.gsavi, "Hue": self.hue, "IVI": self.ivi, "IPVI": self.ipvi, "I": self.i, "RVI": self.rvi, "MRVI": self.mrvi, "MSAVI": self.m_savi, "NormG": self.norm_g, "NormNIR": self.norm_nir, "NormR": self.norm_r, "NGRDI": self.ngrdi, "RI": self.ri, "S": self.s, "IF": self._if, "DVI": self.dvi, "TVI": self.tvi, "NDRE": self.ndre, } try: return funcs[index]() except KeyError: print("Index not in the list!" ) return False def __a ( self ) -> List[str]: return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def __a ( self ) -> int: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def __a ( self ) -> Tuple: return self.nir * (self.red / (self.green**2)) def __a ( self ) -> Any: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def __a ( self ) -> Any: return (self.nir - self.red) / (self.nir + self.red) def __a ( self ) -> Optional[Any]: return (self.nir - self.blue) / (self.nir + self.blue) def __a ( self ) -> List[Any]: return (self.redEdge - self.red) / (self.redEdge + self.red) def __a ( self ) -> int: return (self.nir - self.green) / (self.nir + self.green) def __a ( self ) -> Union[str, Any]: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def __a ( self ) -> Dict: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def __a ( self ) -> str: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def __a ( self ) -> Any: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def __a ( self , _a=0.0_8 , _a=1.2_2 , _a=0.0_3 ) -> int: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def __a ( self ) -> Dict: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def __a ( self ) -> Tuple: return (self.nir / self.green) - 1 def __a ( self ) -> int: return (self.nir / self.redEdge) - 1 def __a ( self ) -> Tuple: return (self.red - self.blue) / self.red def __a ( self ) -> Dict: lowerCAmelCase_ = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def __a ( self ) -> Dict: return self.nir - self.green def __a ( self ) -> Any: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def __a ( self , _a=0.1_6 ) -> Dict: return (self.nir - self.green) / (self.nir + self.green + y) def __a ( self , _a=0.5 ) -> str: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def __a ( self ) -> Any: return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def __a ( self , _a=None , _a=None ) -> List[Any]: return (self.nir - b) / (a * self.red) def __a ( self ) -> Union[str, Any]: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def __a ( self ) -> List[Any]: return (self.red + self.green + self.blue) / 3_0.5 def __a ( self ) -> List[Any]: return self.nir / self.red def __a ( self ) -> Optional[Any]: return (self.rvi() - 1) / (self.rvi() + 1) def __a ( self ) -> Union[str, Any]: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def __a ( self ) -> Union[str, Any]: return self.green / (self.nir + self.red + self.green) def __a ( self ) -> List[str]: return self.nir / (self.nir + self.red + self.green) def __a ( self ) -> Optional[int]: return self.red / (self.nir + self.red + self.green) def __a ( self ) -> Any: return (self.green - self.red) / (self.green + self.red) def __a ( self ) -> Any: return (self.red - self.green) / (self.red + self.green) def __a ( self ) -> Tuple: lowerCAmelCase_ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCAmelCase_ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def __a ( self ) -> Any: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def __a ( self ) -> Dict: return self.nir / self.red def __a ( self ) -> List[str]: return (self.ndvi() + 0.5) ** (1 / 2) def __a ( self ) -> Any: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
def A(__a: str , __a: str ): lowerCAmelCase_ = len(__a ) + 1 lowerCAmelCase_ = len(__a ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. lowerCAmelCase_ = [[0 for i in range(__a )] for j in range(__a )] # since string of zero length match pattern of zero length lowerCAmelCase_ = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , __a ): lowerCAmelCase_ = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , __a ): lowerCAmelCase_ = dp[0][j - 2] if pattern[j - 1] == "*" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , __a ): for j in range(1 , __a ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": lowerCAmelCase_ = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: lowerCAmelCase_ = 1 elif pattern[j - 2] in (input_string[i - 1], "."): lowerCAmelCase_ = dp[i - 1][j] else: lowerCAmelCase_ = 0 else: lowerCAmelCase_ = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") lowerCamelCase__ = '''aab''' lowerCamelCase__ = '''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'''{input_string} matches the given pattern {pattern}''') else: print(F'''{input_string} does not match with the given pattern {pattern}''')
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
def A(__a: str ): if n_term == "": return [] lowerCAmelCase_ = [] for temp in range(int(__a ) ): series.append(F"1/{temp + 1}" if series else "1" ) return series if __name__ == "__main__": lowerCamelCase__ = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
def A(__a: str , __a: str ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = [] for i in range(len(__a ) - pat_len + 1 ): lowerCAmelCase_ = True for j in range(__a ): if s[i + j] != pattern[j]: lowerCAmelCase_ = False break if match_found: position.append(__a ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
from __future__ import annotations from collections import Counter from random import random class __magic_name__ : def __init__( self ) -> str: lowerCAmelCase_ = {} def __a ( self , _a ) -> None: lowerCAmelCase_ = {} def __a ( self , _a , _a , _a ) -> None: if nodea not in self.connections: self.add_node(_a ) if nodea not in self.connections: self.add_node(_a ) lowerCAmelCase_ = probability def __a ( self ) -> list[str]: return list(self.connections ) def __a ( self , _a ) -> str: lowerCAmelCase_ = 0 lowerCAmelCase_ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A(__a: str , __a: list[tuple[str, str, float]] , __a: int ): lowerCAmelCase_ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__a , __a , __a ) lowerCAmelCase_ = Counter(graph.get_nodes() ) lowerCAmelCase_ = start for _ in range(__a ): lowerCAmelCase_ = graph.transition(__a ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __magic_name__ : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=False , _a=False , _a=False , _a=2 , _a=99 , _a=0 , _a=32 , _a=5 , _a=4 , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=2 , _a=4 , _a="last" , _a=True , _a=None , _a=0 , ) -> Union[str, Any]: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_input_lengths lowerCAmelCase_ = use_token_type_ids lowerCAmelCase_ = use_labels lowerCAmelCase_ = gelu_activation lowerCAmelCase_ = sinusoidal_embeddings lowerCAmelCase_ = causal lowerCAmelCase_ = asm lowerCAmelCase_ = n_langs lowerCAmelCase_ = vocab_size lowerCAmelCase_ = n_special lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = num_labels lowerCAmelCase_ = num_choices lowerCAmelCase_ = summary_type lowerCAmelCase_ = use_proj lowerCAmelCase_ = scope lowerCAmelCase_ = bos_token_id def __a ( self ) -> Dict: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ = None if self.use_input_lengths: lowerCAmelCase_ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase_ = None if self.use_token_type_ids: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self ) -> List[str]: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]: lowerCAmelCase_ = XLMModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , lengths=_a , langs=_a ) lowerCAmelCase_ = model(_a , langs=_a ) lowerCAmelCase_ = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Optional[int]: lowerCAmelCase_ = XLMWithLMHeadModel(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[Any]: lowerCAmelCase_ = XLMForQuestionAnsweringSimple(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a ) lowerCAmelCase_ = model(_a , start_positions=_a , end_positions=_a ) lowerCAmelCase_ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]: lowerCAmelCase_ = XLMForQuestionAnswering(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a ) lowerCAmelCase_ = model( _a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , ) lowerCAmelCase_ = model( _a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , ) ((lowerCAmelCase_) , ) = result_with_labels.to_tuple() lowerCAmelCase_ = model(_a , start_positions=_a , end_positions=_a ) ((lowerCAmelCase_) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int: lowerCAmelCase_ = XLMForSequenceClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a ) lowerCAmelCase_ = model(_a , labels=_a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Any: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = XLMForTokenClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Tuple: lowerCAmelCase_ = self.num_choices lowerCAmelCase_ = XLMForMultipleChoice(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self ) -> Dict: lowerCAmelCase_ = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = config_and_inputs lowerCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class __magic_name__ (__lowercase , __lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase__ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowerCamelCase__ = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self , _a , _a , _a , _a , _a ) -> Optional[int]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self , _a , _a , _a=False ) -> str: lowerCAmelCase_ = super()._prepare_for_class(_a , _a , return_labels=_a ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowerCAmelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a ) lowerCAmelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a ) return inputs_dict def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = XLMModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_a , emb_dim=37 ) def __a ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_a ) def __a ( self ) -> Tuple: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_a ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_a ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_a ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_a ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_a ) def __a ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ) -> List[str]: self.assertIsInstance(_a , _a ) self.assertListEqual( [isinstance(_a , _a ) for iter_attentions in attentions] , [True] * len(_a ) ) self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_a ): # adds PAD dummy token lowerCAmelCase_ = min_length + idx + 1 lowerCAmelCase_ = min_length + idx + 1 lowerCAmelCase_ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a ) ) def __a ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ) -> str: self.assertIsInstance(_a , _a ) self.assertListEqual( [isinstance(_a , _a ) for iter_hidden_states in hidden_states] , [True] * len(_a ) , ) self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_a ): # adds PAD dummy token lowerCAmelCase_ = min_length + idx + 1 lowerCAmelCase_ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a ) , ) pass @slow def __a ( self ) -> Dict: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = XLMModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class __magic_name__ (unittest.TestCase ): @slow def __a ( self ) -> List[str]: lowerCAmelCase_ = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(_a ) lowerCAmelCase_ = torch.tensor([[14, 447]] , dtype=torch.long , device=_a ) # the president lowerCAmelCase_ = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowerCAmelCase_ = model.generate(_a , do_sample=_a ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a )
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''BlipImageProcessor''' lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , _a , _a ) -> Union[str, Any]: lowerCAmelCase_ = False super().__init__(_a , _a ) lowerCAmelCase_ = self.image_processor def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: lowerCAmelCase_ = self.tokenizer lowerCAmelCase_ = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) return text_encoding # add pixel_values lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a ) if text is not None: lowerCAmelCase_ = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) else: lowerCAmelCase_ = None if text_encoding is not None: encoding_image_processor.update(_a ) return encoding_image_processor def __a ( self , *_a , **_a ) -> Tuple: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Dict: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> Tuple: lowerCAmelCase_ = self.tokenizer.model_input_names lowerCAmelCase_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1