code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase :Optional[Any] = logging.get_logger(__name__) lowerCAmelCase :Union[str, Any] = { '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = """instructblip_vision_model""" def __init__( self : List[Any] , _A : Optional[Any]=1408 , _A : Any=6144 , _A : int=39 , _A : List[Any]=16 , _A : str=224 , _A : List[Any]=14 , _A : int="gelu" , _A : Tuple=1E-6 , _A : Tuple=0.0 , _A : Dict=1E-10 , _A : Tuple=True , **_A : Tuple , ) -> Optional[Any]: super().__init__(**_A ) __magic_name__ : Union[str, Any] = hidden_size __magic_name__ : List[str] = intermediate_size __magic_name__ : str = num_hidden_layers __magic_name__ : Optional[int] = num_attention_heads __magic_name__ : Union[str, Any] = patch_size __magic_name__ : Any = image_size __magic_name__ : str = initializer_range __magic_name__ : Union[str, Any] = attention_dropout __magic_name__ : Any = layer_norm_eps __magic_name__ : Dict = hidden_act __magic_name__ : List[str] = qkv_bias @classmethod def __lowerCAmelCase ( cls : Optional[int] , _A : Union[str, os.PathLike] , **_A : Dict ) -> "PretrainedConfig": cls._set_token_in_kwargs(_A ) __magic_name__ , __magic_name__ : Dict = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __magic_name__ : str = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_A , **_A ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = """instructblip_qformer""" def __init__( self : Union[str, Any] , _A : int=30522 , _A : Optional[int]=768 , _A : Optional[int]=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Any="gelu" , _A : Dict=0.1 , _A : Tuple=0.1 , _A : Tuple=512 , _A : List[str]=0.02 , _A : Tuple=1E-12 , _A : int=0 , _A : Optional[Any]="absolute" , _A : List[Any]=2 , _A : int=1408 , **_A : int , ) -> Any: super().__init__(pad_token_id=_A , **_A ) __magic_name__ : int = vocab_size __magic_name__ : Union[str, Any] = hidden_size __magic_name__ : Union[str, Any] = num_hidden_layers __magic_name__ : Optional[int] = num_attention_heads __magic_name__ : Union[str, Any] = hidden_act __magic_name__ : str = intermediate_size __magic_name__ : List[str] = hidden_dropout_prob __magic_name__ : str = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : int = initializer_range __magic_name__ : Any = layer_norm_eps __magic_name__ : List[str] = position_embedding_type __magic_name__ : str = cross_attention_frequency __magic_name__ : List[Any] = encoder_hidden_size @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , _A : Union[str, os.PathLike] , **_A : List[str] ) -> "PretrainedConfig": cls._set_token_in_kwargs(_A ) __magic_name__ , __magic_name__ : Optional[Any] = cls.get_config_dict(_A , **_A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __magic_name__ : Union[str, Any] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_A , **_A ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : List[str] = """instructblip""" A_ : int = True def __init__( self : Optional[Any] , _A : Tuple=None , _A : int=None , _A : Union[str, Any]=None , _A : Any=32 , **_A : Optional[int] ) -> Dict: super().__init__(**_A ) if vision_config is None: __magic_name__ : str = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __magic_name__ : List[str] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __magic_name__ : Union[str, Any] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __magic_name__ : Union[str, Any] = InstructBlipVisionConfig(**_A ) __magic_name__ : int = InstructBlipQFormerConfig(**_A ) __magic_name__ : Tuple = text_config['model_type'] if 'model_type' in text_config else 'opt' __magic_name__ : Union[str, Any] = CONFIG_MAPPING[text_model_type](**_A ) __magic_name__ : Dict = self.text_config.tie_word_embeddings __magic_name__ : List[Any] = self.text_config.is_encoder_decoder __magic_name__ : Optional[int] = num_query_tokens __magic_name__ : str = self.vision_config.hidden_size __magic_name__ : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __magic_name__ : str = 1.0 __magic_name__ : Union[str, Any] = 0.02 @classmethod def __lowerCAmelCase ( cls : int , _A : InstructBlipVisionConfig , _A : InstructBlipQFormerConfig , _A : PretrainedConfig , **_A : int , ) -> Any: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , ) def __lowerCAmelCase ( self : Dict ) -> int: __magic_name__ : str = copy.deepcopy(self.__dict__ ) __magic_name__ : List[str] = self.vision_config.to_dict() __magic_name__ : List[str] = self.qformer_config.to_dict() __magic_name__ : Union[str, Any] = self.text_config.to_dict() __magic_name__ : int = self.__class__.model_type return output
331
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer lowerCAmelCase :Any = logging.get_logger(__name__) lowerCAmelCase :Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase :Dict = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase :Union[str, Any] = { '''distilbert-base-uncased''': 5_1_2, '''distilbert-base-uncased-distilled-squad''': 5_1_2, '''distilbert-base-cased''': 5_1_2, '''distilbert-base-cased-distilled-squad''': 5_1_2, '''distilbert-base-german-cased''': 5_1_2, '''distilbert-base-multilingual-cased''': 5_1_2, } lowerCAmelCase :Tuple = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Dict = PRETRAINED_VOCAB_FILES_MAP A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : int = PRETRAINED_INIT_CONFIGURATION A_ : Optional[Any] = ["""input_ids""", """attention_mask"""] A_ : Union[str, Any] = DistilBertTokenizer def __init__( self : Union[str, Any] , _A : Optional[Any]=None , _A : Optional[int]=None , _A : Tuple=True , _A : Dict="[UNK]" , _A : List[str]="[SEP]" , _A : Optional[int]="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : Dict="[MASK]" , _A : int=True , _A : Optional[Any]=None , **_A : List[str] , ) -> Dict: super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) __magic_name__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): __magic_name__ : str = getattr(_A , normalizer_state.pop('type' ) ) __magic_name__ : Any = do_lower_case __magic_name__ : Optional[Any] = strip_accents __magic_name__ : Optional[int] = tokenize_chinese_chars __magic_name__ : Dict = normalizer_class(**_A ) __magic_name__ : List[str] = do_lower_case def __lowerCAmelCase ( self : Dict , _A : Any , _A : Optional[Any]=None ) -> Optional[Any]: __magic_name__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: __magic_name__ : Dict = [self.sep_token_id] __magic_name__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: __magic_name__ : Optional[int] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
331
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
1
'''simple docstring''' import os def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Any = os.path.dirname(os.path.realpath(lowerCAmelCase ) ) __magic_name__ : str = os.path.join(lowerCAmelCase , 'triangle.txt' ) with open(lowerCAmelCase ) as f: __magic_name__ : int = f.readlines() __magic_name__ : Union[str, Any] = [] for line in triangle: __magic_name__ : List[str] = [] for number in line.strip().split(' ' ): numbers_from_line.append(int(lowerCAmelCase ) ) a.append(lowerCAmelCase ) for i in range(1 , len(lowerCAmelCase ) ): for j in range(len(a[i] ) ): __magic_name__ : Tuple = a[i - 1][j] if j != len(a[i - 1] ) else 0 __magic_name__ : Union[str, Any] = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(lowerCAmelCase , lowerCAmelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
331
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
1
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def lowerCamelCase ( lowerCAmelCase : Optional[Any] ): """simple docstring""" __magic_name__ : Union[str, Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __magic_name__ : Any = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __magic_name__ : int = 4 __magic_name__ : List[Any] = 48 __magic_name__ : Any = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __magic_name__ : Optional[Any] = [6, 6, 6, 6] __magic_name__ : Dict = 60 __magic_name__ : int = [6, 6, 6, 6] __magic_name__ : Any = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __magic_name__ : List[Any] = 4 __magic_name__ : List[Any] = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __magic_name__ : List[Any] = 1 __magic_name__ : Union[str, Any] = 1 __magic_name__ : List[Any] = 126 __magic_name__ : List[str] = 7 __magic_name__ : str = 255.0 __magic_name__ : Any = '' return config def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: __magic_name__ : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __magic_name__ : int = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: __magic_name__ : Optional[Any] = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: __magic_name__ : List[str] = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: __magic_name__ : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __magic_name__ : List[str] = name.replace('attn' , 'attention.self' ) if "norm1" in name: __magic_name__ : int = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __magic_name__ : Union[str, Any] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __magic_name__ : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __magic_name__ : Dict = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __magic_name__ : int = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __magic_name__ : Union[str, Any] = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __magic_name__ : str = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __magic_name__ : List[Any] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: __magic_name__ : Optional[int] = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": __magic_name__ : List[Any] = 'layernorm.weight' if name == "norm.bias": __magic_name__ : str = 'layernorm.bias' if "conv_first" in name: __magic_name__ : int = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __magic_name__ : Tuple = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __magic_name__ : List[str] = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: __magic_name__ : Any = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: __magic_name__ : int = name.replace('upsample.2' , 'upsample.convolution_1' ) __magic_name__ : Union[str, Any] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": __magic_name__ : List[str] = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) __magic_name__ : Any = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: __magic_name__ : Optional[Any] = 'swin2sr.' + name return name def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : int ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ : Optional[Any] = orig_state_dict.pop(lowerCAmelCase ) if "qkv" in key: __magic_name__ : List[Any] = key.split('.' ) __magic_name__ : int = int(key_split[1] ) __magic_name__ : List[str] = int(key_split[4] ) __magic_name__ : List[str] = config.embed_dim if "weight" in key: __magic_name__ : int = val[:dim, :] __magic_name__ : Dict = val[dim : dim * 2, :] __magic_name__ : Optional[int] = val[-dim:, :] else: __magic_name__ : str = val[:dim] __magic_name__ : str = val[dim : dim * 2] __magic_name__ : List[str] = val[-dim:] pass else: __magic_name__ : Dict = val return orig_state_dict def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str ): """simple docstring""" __magic_name__ : Tuple = get_config(lowerCAmelCase ) __magic_name__ : int = SwinaSRForImageSuperResolution(lowerCAmelCase ) model.eval() __magic_name__ : str = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' ) __magic_name__ : Optional[int] = convert_state_dict(lowerCAmelCase , lowerCAmelCase ) __magic_name__ , __magic_name__ : List[str] = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) if len(lowerCAmelCase ) > 0: raise ValueError('Missing keys when converting: {}'.format(lowerCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'Unexpected key {key} in state_dict' ) # verify values __magic_name__ : Optional[int] = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' __magic_name__ : Any = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ).convert('RGB' ) __magic_name__ : List[str] = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __magic_name__ : Optional[int] = 126 if 'Jpeg' in checkpoint_url else 256 __magic_name__ : List[Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __magic_name__ : int = transforms(lowerCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: __magic_name__ : Dict = pixel_values[:, 0, :, :].unsqueeze(1 ) __magic_name__ : Dict = model(lowerCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __magic_name__ : Dict = torch.Size([1, 3, 512, 512] ) __magic_name__ : Optional[int] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __magic_name__ : Optional[int] = torch.Size([1, 3, 1024, 1024] ) __magic_name__ : Optional[Any] = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __magic_name__ : Optional[int] = torch.Size([1, 3, 1024, 1024] ) __magic_name__ : List[str] = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __magic_name__ : List[str] = torch.Size([1, 3, 512, 512] ) __magic_name__ : Tuple = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __magic_name__ : Optional[int] = torch.Size([1, 3, 1024, 1024] ) __magic_name__ : Optional[Any] = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCAmelCase , atol=1e-3 ) print('Looks ok!' ) __magic_name__ : Any = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } __magic_name__ : Optional[int] = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowerCAmelCase ) if push_to_hub: model.push_to_hub(f'caidas/{model_name}' ) processor.push_to_hub(f'caidas/{model_name}' ) if __name__ == "__main__": lowerCAmelCase :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') lowerCAmelCase :Optional[int] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
331
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
1
'''simple docstring''' import argparse import os import re import packaging.version lowerCAmelCase :Any = '''examples/''' lowerCAmelCase :List[Any] = { '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowerCAmelCase :Dict = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } lowerCAmelCase :Optional[Any] = '''README.md''' def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ): """simple docstring""" with open(lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: __magic_name__ : List[Any] = f.read() __magic_name__ , __magic_name__ : Tuple = REPLACE_PATTERNS[pattern] __magic_name__ : Union[str, Any] = replace.replace('VERSION' , lowerCAmelCase ) __magic_name__ : List[str] = re_pattern.sub(lowerCAmelCase , lowerCAmelCase ) with open(lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any ): """simple docstring""" for folder, directories, fnames in os.walk(lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase , pattern='examples' ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any]=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if not patch: update_version_in_examples(lowerCAmelCase ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Union[str, Any] = '🤗 Transformers currently provides the following architectures' __magic_name__ : List[Any] = '1. Want to contribute a new model?' with open(lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: __magic_name__ : str = f.readlines() # Find the start of the list. __magic_name__ : Optional[Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __magic_name__ : Any = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): __magic_name__ : Optional[Any] = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lowerCAmelCase ) def lowerCamelCase ( ): """simple docstring""" with open(REPLACE_FILES['init'] , 'r' ) as f: __magic_name__ : List[str] = f.read() __magic_name__ : Tuple = REPLACE_PATTERNS['init'][0].search(lowerCAmelCase ).groups()[0] return packaging.version.parse(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any]=False ): """simple docstring""" __magic_name__ : int = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: __magic_name__ : Union[str, Any] = default_version.base_version elif patch: __magic_name__ : str = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __magic_name__ : str = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __magic_name__ : Tuple = input(f'Which version are you releasing? [{default_version}]' ) if len(lowerCAmelCase ) == 0: __magic_name__ : int = default_version print(f'Updating version to {version}.' ) global_version_update(lowerCAmelCase , patch=lowerCAmelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def lowerCamelCase ( ): """simple docstring""" __magic_name__ : int = get_version() __magic_name__ : Union[str, Any] = f'{current_version.major}.{current_version.minor + 1}.0.dev0' __magic_name__ : Optional[int] = current_version.base_version # Check with the user we got that right. __magic_name__ : Tuple = input(f'Which version are we developing now? [{dev_version}]' ) if len(lowerCAmelCase ) == 0: __magic_name__ : Any = dev_version print(f'Updating version to {version}.' ) global_version_update(lowerCAmelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase :str = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCAmelCase :List[str] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
331
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
1
'''simple docstring''' from collections.abc import Callable import numpy as np def lowerCamelCase ( lowerCAmelCase : Callable , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" __magic_name__ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) __magic_name__ : Tuple = np.zeros((n + 1,) ) __magic_name__ : Optional[Any] = ya __magic_name__ : Any = xa for k in range(lowerCAmelCase ): __magic_name__ : List[Any] = y[k] + step_size * ode_func(lowerCAmelCase , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
331
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[str] = DiTPipeline A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS A_ : List[Any] = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS A_ : Optional[Any] = False def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: torch.manual_seed(0 ) __magic_name__ : Optional[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_A , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=_A , ) __magic_name__ : Optional[Any] = AutoencoderKL() __magic_name__ : Union[str, Any] = DDIMScheduler() __magic_name__ : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def __lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : str=0 ) -> Any: if str(_A ).startswith('mps' ): __magic_name__ : Tuple = torch.manual_seed(_A ) else: __magic_name__ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[Any] = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : Optional[int] = 'cpu' __magic_name__ : List[str] = self.get_dummy_components() __magic_name__ : Union[str, Any] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __magic_name__ : int = self.get_dummy_inputs(_A ) __magic_name__ : str = pipe(**_A ).images __magic_name__ : List[str] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) __magic_name__ : int = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) __magic_name__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1E-3 ) def __lowerCAmelCase ( self : Any ) -> str: self._test_inference_batch_single_identical(relax_max_difference=_A , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : int ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Union[str, Any] ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = torch.manual_seed(0 ) __magic_name__ : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) __magic_name__ : List[str] = ['vase', 'umbrella', 'white shark', 'white wolf'] __magic_name__ : str = pipe.get_label_ids(_A ) __magic_name__ : Optional[Any] = pipe(_A , generator=_A , num_inference_steps=40 , output_type='np' ).images for word, image in zip(_A , _A ): __magic_name__ : List[str] = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: __magic_name__ : Optional[int] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) __magic_name__ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) __magic_name__ : Dict = ['vase', 'umbrella'] __magic_name__ : Optional[int] = pipe.get_label_ids(_A ) __magic_name__ : str = torch.manual_seed(0 ) __magic_name__ : Any = pipe(_A , generator=_A , num_inference_steps=25 , output_type='np' ).images for word, image in zip(_A , _A ): __magic_name__ : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
331
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCAmelCase :Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :List[str] = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = """SpeechT5FeatureExtractor""" A_ : Tuple = """SpeechT5Tokenizer""" def __init__( self : Dict , _A : int , _A : str ) -> int: super().__init__(_A , _A ) def __call__( self : Dict , *_A : Dict , **_A : Dict ) -> Optional[Any]: __magic_name__ : int = kwargs.pop('audio' , _A ) __magic_name__ : Union[str, Any] = kwargs.pop('text' , _A ) __magic_name__ : List[Any] = kwargs.pop('text_target' , _A ) __magic_name__ : List[Any] = kwargs.pop('audio_target' , _A ) __magic_name__ : Union[str, Any] = kwargs.pop('sampling_rate' , _A ) if audio is not None and text is not None: raise ValueError( 'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' ) if audio_target is not None and text_target is not None: raise ValueError( 'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( 'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' ) if audio is not None: __magic_name__ : Tuple = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A ) elif text is not None: __magic_name__ : Tuple = self.tokenizer(_A , **_A ) else: __magic_name__ : str = None if audio_target is not None: __magic_name__ : Any = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A ) __magic_name__ : Dict = targets['input_values'] elif text_target is not None: __magic_name__ : Tuple = self.tokenizer(_A , **_A ) __magic_name__ : int = targets['input_ids'] else: __magic_name__ : Optional[int] = None if inputs is None: return targets if targets is not None: __magic_name__ : Optional[int] = labels __magic_name__ : Dict = targets.get('attention_mask' ) if decoder_attention_mask is not None: __magic_name__ : List[str] = decoder_attention_mask return inputs def __lowerCAmelCase ( self : List[str] , *_A : List[Any] , **_A : int ) -> Optional[Any]: __magic_name__ : Dict = kwargs.pop('input_values' , _A ) __magic_name__ : List[str] = kwargs.pop('input_ids' , _A ) __magic_name__ : Optional[int] = kwargs.pop('labels' , _A ) if input_values is not None and input_ids is not None: raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' ) if input_values is None and input_ids is None and labels is None: raise ValueError( 'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' ) if input_values is not None: __magic_name__ : Optional[Any] = self.feature_extractor.pad(_A , *_A , **_A ) elif input_ids is not None: __magic_name__ : List[str] = self.tokenizer.pad(_A , **_A ) else: __magic_name__ : Tuple = None if labels is not None: if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]): __magic_name__ : str = self.tokenizer.pad(_A , **_A ) __magic_name__ : Dict = targets['input_ids'] else: __magic_name__ : List[str] = self.feature_extractor.feature_size __magic_name__ : Optional[int] = self.feature_extractor.num_mel_bins __magic_name__ : Tuple = self.feature_extractor.pad(_A , *_A , **_A ) __magic_name__ : List[Any] = feature_size_hack __magic_name__ : Any = targets['input_values'] else: __magic_name__ : str = None if inputs is None: return targets if targets is not None: __magic_name__ : List[str] = labels __magic_name__ : str = targets.get('attention_mask' ) if decoder_attention_mask is not None: __magic_name__ : Tuple = decoder_attention_mask return inputs def __lowerCAmelCase ( self : Union[str, Any] , *_A : str , **_A : Dict ) -> Any: return self.tokenizer.batch_decode(*_A , **_A ) def __lowerCAmelCase ( self : Dict , *_A : List[str] , **_A : Optional[int] ) -> Tuple: return self.tokenizer.decode(*_A , **_A )
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowerCAmelCase :List[str] = logging.getLogger() def lowerCamelCase ( lowerCAmelCase : Optional[int] ): """simple docstring""" __magic_name__ : Dict = {} __magic_name__ : Dict = os.path.join(lowerCAmelCase , 'all_results.json' ) if os.path.exists(lowerCAmelCase ): with open(lowerCAmelCase , 'r' ) as f: __magic_name__ : List[Any] = json.load(lowerCAmelCase ) else: raise ValueError(f'can\'t find {path}' ) return results lowerCAmelCase :Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: import xla_spawn __magic_name__ : Any = self.get_auto_remove_tmp_dir() __magic_name__ : int = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(_A , 'argv' , _A ): __magic_name__ : Tuple = time() xla_spawn.main() __magic_name__ : str = time() __magic_name__ : List[Any] = get_results(_A ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import xla_spawn __magic_name__ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(_A , 'argv' , _A ): xla_spawn.main()
331
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
1
'''simple docstring''' from __future__ import annotations import requests def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" __magic_name__ : Union[str, Any] = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(lowerCAmelCase ).json() def lowerCamelCase ( lowerCAmelCase : int = 10 ): """simple docstring""" __magic_name__ : List[str] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' __magic_name__ : Optional[int] = requests.get(lowerCAmelCase ).json()[:max_stories] return [get_hackernews_story(lowerCAmelCase ) for story_id in story_ids] def lowerCamelCase ( lowerCAmelCase : int = 10 ): """simple docstring""" __magic_name__ : Tuple = hackernews_top_stories(lowerCAmelCase ) return "\n".join('* [{title}]({url})'.format(**lowerCAmelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
1
'''simple docstring''' def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
331
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : int = DanceDiffusionPipeline A_ : List[str] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS A_ : List[str] = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } A_ : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS A_ : int = False A_ : Any = False def __lowerCAmelCase ( self : Tuple ) -> List[str]: torch.manual_seed(0 ) __magic_name__ : Dict = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_A , use_timestep_embedding=_A , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) __magic_name__ : int = IPNDMScheduler() __magic_name__ : Dict = { 'unet': unet, 'scheduler': scheduler, } return components def __lowerCAmelCase ( self : Tuple , _A : int , _A : Dict=0 ) -> Optional[Any]: if str(_A ).startswith('mps' ): __magic_name__ : List[Any] = torch.manual_seed(_A ) else: __magic_name__ : int = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : Optional[int] = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def __lowerCAmelCase ( self : Optional[int] ) -> Dict: __magic_name__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __magic_name__ : int = self.get_dummy_components() __magic_name__ : List[str] = DanceDiffusionPipeline(**_A ) __magic_name__ : Optional[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __magic_name__ : int = self.get_dummy_inputs(_A ) __magic_name__ : Union[str, Any] = pipe(**_A ) __magic_name__ : Tuple = output.audios __magic_name__ : List[str] = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) __magic_name__ : Optional[int] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def __lowerCAmelCase ( self : int ) -> Optional[int]: return super().test_save_load_local() @skip_mps def __lowerCAmelCase ( self : Dict ) -> Optional[int]: return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def __lowerCAmelCase ( self : Any ) -> Any: return super().test_save_load_optional_components() @skip_mps def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: return super().test_attention_slicing_forward_pass() def __lowerCAmelCase ( self : List[str] ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Dict ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: __magic_name__ : int = torch_device __magic_name__ : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) __magic_name__ : Any = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __magic_name__ : Any = torch.manual_seed(0 ) __magic_name__ : Dict = pipe(generator=_A , num_inference_steps=100 , audio_length_in_s=4.096 ) __magic_name__ : int = output.audios __magic_name__ : int = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) __magic_name__ : Any = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self : str ) -> int: __magic_name__ : str = torch_device __magic_name__ : Any = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) __magic_name__ : Optional[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __magic_name__ : int = torch.manual_seed(0 ) __magic_name__ : List[Any] = pipe(generator=_A , num_inference_steps=100 , audio_length_in_s=4.096 ) __magic_name__ : Optional[Any] = output.audios __magic_name__ : str = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) __magic_name__ : List[str] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
331
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class _lowerCamelCase : '''simple docstring''' A_ : Optional[int] = None A_ : Optional[jnp.ndarray] = None A_ : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def __lowerCAmelCase ( cls : Any ) -> Dict: return cls() @dataclass class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : jnp.ndarray A_ : jnp.ndarray A_ : KarrasVeSchedulerState class _lowerCamelCase ( lowercase__ , lowercase__ ): '''simple docstring''' @property def __lowerCAmelCase ( self : str ) -> List[Any]: return True @register_to_config def __init__( self : Union[str, Any] , _A : float = 0.02 , _A : float = 100 , _A : float = 1.007 , _A : float = 80 , _A : float = 0.05 , _A : float = 50 , ) -> int: pass def __lowerCAmelCase ( self : int ) -> Any: return KarrasVeSchedulerState.create() def __lowerCAmelCase ( self : Dict , _A : KarrasVeSchedulerState , _A : int , _A : Tuple = () ) -> KarrasVeSchedulerState: __magic_name__ : int = jnp.arange(0 , _A )[::-1].copy() __magic_name__ : Any = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_A , schedule=jnp.array(_A , dtype=jnp.floataa ) , timesteps=_A , ) def __lowerCAmelCase ( self : Any , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: __magic_name__ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: __magic_name__ : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) __magic_name__ : Any = random.split(_A , num=1 ) __magic_name__ : Optional[int] = self.config.s_noise * random.normal(key=_A , shape=sample.shape ) __magic_name__ : Any = sigma + gamma * sigma __magic_name__ : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __lowerCAmelCase ( self : List[str] , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: __magic_name__ : Union[str, Any] = sample_hat + sigma_hat * model_output __magic_name__ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat __magic_name__ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A ) def __lowerCAmelCase ( self : Union[str, Any] , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , _A : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: __magic_name__ : Optional[Any] = sample_prev + sigma_prev * model_output __magic_name__ : Tuple = (sample_prev - pred_original_sample) / sigma_prev __magic_name__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A ) def __lowerCAmelCase ( self : List[Any] , _A : KarrasVeSchedulerState , _A : Optional[Any] , _A : Any , _A : str ) -> List[str]: raise NotImplementedError()
331
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase :Tuple = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :List[str] = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[int] , _A : list ) -> None: __magic_name__ : Tuple = set_counts __magic_name__ : Tuple = max(_A ) __magic_name__ : List[str] = len(_A ) __magic_name__ : Optional[Any] = [1] * num_sets __magic_name__ : Tuple = list(range(_A ) ) def __lowerCAmelCase ( self : int , _A : int , _A : int ) -> bool: __magic_name__ : List[Any] = self.get_parent(_A ) __magic_name__ : int = self.get_parent(_A ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __magic_name__ : Union[str, Any] = 0 __magic_name__ : List[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __magic_name__ : Any = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __magic_name__ : Union[str, Any] = 0 __magic_name__ : Tuple = src_parent __magic_name__ : List[str] = self.set_counts[src_parent] __magic_name__ : List[str] = max(self.max_set , _A ) return True def __lowerCAmelCase ( self : Union[str, Any] , _A : int ) -> int: if self.parents[disj_set] == disj_set: return disj_set __magic_name__ : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
331
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase :Any = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]: __magic_name__ : str = {} if "candidate_labels" in kwargs: __magic_name__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __magic_name__ : Tuple = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int: __magic_name__ : Dict = load_image(_A ) __magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) __magic_name__ : Optional[Any] = candidate_labels __magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels] __magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) __magic_name__ : Optional[Any] = [text_inputs] return inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str: __magic_name__ : str = model_inputs.pop('candidate_labels' ) __magic_name__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _A ): __magic_name__ : Dict = text_inputs[0] else: # Batching case. __magic_name__ : Optional[Any] = text_inputs[0][0] __magic_name__ : List[Any] = self.model(**_A , **_A ) __magic_name__ : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]: __magic_name__ : Tuple = model_outputs.pop('candidate_labels' ) __magic_name__ : Union[str, Any] = model_outputs['logits'][0] if self.framework == "pt": __magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) __magic_name__ : Tuple = probs.tolist() if not isinstance(_A , _A ): __magic_name__ : Any = [scores] elif self.framework == "tf": __magic_name__ : Any = stable_softmax(_A , axis=-1 ) __magic_name__ : Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __magic_name__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
331
1
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : str , _A : Any , _A : Union[str, Any]=13 , _A : Tuple=7 , _A : Optional[Any]=True , _A : Optional[int]=True , _A : List[str]=True , _A : str=True , _A : Tuple=99 , _A : Optional[Any]=32 , _A : Union[str, Any]=2 , _A : Any=4 , _A : List[Any]=37 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Tuple=16 , _A : List[Any]=2 , _A : str=0.02 , _A : Dict=3 , _A : Tuple=4 , _A : int=None , _A : Optional[int]=1000 , ) -> List[str]: __magic_name__ : Dict = parent __magic_name__ : Tuple = batch_size __magic_name__ : Any = seq_length __magic_name__ : Dict = is_training __magic_name__ : List[Any] = use_input_mask __magic_name__ : List[Any] = use_token_type_ids __magic_name__ : Any = use_labels __magic_name__ : List[Any] = vocab_size __magic_name__ : List[str] = hidden_size __magic_name__ : List[str] = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Optional[int] = intermediate_size __magic_name__ : Dict = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : List[str] = attention_probs_dropout_prob __magic_name__ : Dict = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Optional[int] = type_sequence_label_size __magic_name__ : Any = initializer_range __magic_name__ : int = num_labels __magic_name__ : Union[str, Any] = num_choices __magic_name__ : Tuple = scope __magic_name__ : Optional[Any] = range_bbox def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment __magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __magic_name__ : str = bbox[i, j, 3] __magic_name__ : int = bbox[i, j, 1] __magic_name__ : Dict = t if bbox[i, j, 2] < bbox[i, j, 0]: __magic_name__ : int = bbox[i, j, 2] __magic_name__ : Tuple = bbox[i, j, 0] __magic_name__ : Optional[int] = t __magic_name__ : Optional[int] = tf.convert_to_tensor(_A ) __magic_name__ : Any = None if self.use_input_mask: __magic_name__ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : int = None __magic_name__ : List[Any] = None __magic_name__ : Dict = None if self.use_labels: __magic_name__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : List[str] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : str , _A : Dict , _A : int , _A : int , _A : str , _A : Union[str, Any] , _A : Optional[Any] ) -> Tuple: __magic_name__ : Optional[Any] = TFLayoutLMModel(config=_A ) __magic_name__ : List[Any] = model(_A , _A , attention_mask=_A , token_type_ids=_A ) __magic_name__ : Any = model(_A , _A , token_type_ids=_A ) __magic_name__ : Dict = model(_A , _A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self : List[Any] , _A : str , _A : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] , _A : str , _A : Tuple ) -> List[str]: __magic_name__ : Tuple = TFLayoutLMForMaskedLM(config=_A ) __magic_name__ : Any = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Union[str, Any] , _A : Optional[int] , _A : Tuple , _A : Optional[int] , _A : Any , _A : str , _A : Optional[int] ) -> List[Any]: __magic_name__ : Tuple = self.num_labels __magic_name__ : Optional[Any] = TFLayoutLMForSequenceClassification(config=_A ) __magic_name__ : int = model(_A , _A , attention_mask=_A , token_type_ids=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : int , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : str , _A : Tuple , _A : List[Any] , _A : int ) -> List[Any]: __magic_name__ : List[str] = self.num_labels __magic_name__ : Optional[Any] = TFLayoutLMForTokenClassification(config=_A ) __magic_name__ : Optional[int] = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Any , _A : Dict , _A : List[Any] , _A : int , _A : Any , _A : Optional[int] , _A : str , _A : str , _A : List[Any] ) -> Optional[Any]: __magic_name__ : Tuple = TFLayoutLMForQuestionAnswering(config=_A ) __magic_name__ : Tuple = model(_A , _A , attention_mask=_A , token_type_ids=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Dict ) -> str: __magic_name__ : List[Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Any = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) A_ : str = ( { """feature-extraction""": TFLayoutLMModel, """fill-mask""": TFLayoutLMForMaskedLM, """text-classification""": TFLayoutLMForSequenceClassification, """token-classification""": TFLayoutLMForTokenClassification, """zero-shot""": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) A_ : int = False A_ : Any = True A_ : List[Any] = 10 def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Tuple = TFLayoutLMModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : Optional[int] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> str: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> int: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) @slow def __lowerCAmelCase ( self : Any ) -> Any: for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : Dict = TFLayoutLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: pass def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Union[str, Any] = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 __magic_name__ : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 __magic_name__ : List[Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 __magic_name__ : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) __magic_name__ : Dict = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : Dict = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass __magic_name__ : List[str] = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A ) # test the sequence output on [0, :3, :3] __magic_name__ : Dict = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-3 ) ) # test the pooled output on [1, :3] __magic_name__ : Dict = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _A , atol=1E-3 ) ) @slow def __lowerCAmelCase ( self : Optional[int] ) -> Tuple: # initialize model with randomly initialized sequence classification head __magic_name__ : Tuple = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = prepare_layoutlm_batch_inputs() # forward pass __magic_name__ : Optional[Any] = model( input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar __magic_name__ : int = outputs.loss __magic_name__ : Union[str, Any] = (2,) self.assertEqual(loss.shape , _A ) # test the shape of the logits __magic_name__ : List[str] = outputs.logits __magic_name__ : List[Any] = (2, 2) self.assertEqual(logits.shape , _A ) @slow def __lowerCAmelCase ( self : int ) -> int: # initialize model with randomly initialized token classification head __magic_name__ : Any = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : str = prepare_layoutlm_batch_inputs() # forward pass __magic_name__ : int = model( input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A ) # test the shape of the logits __magic_name__ : Tuple = outputs.logits __magic_name__ : int = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _A ) @slow def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: # initialize model with randomly initialized token classification head __magic_name__ : int = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple = prepare_layoutlm_batch_inputs() # forward pass __magic_name__ : List[str] = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A ) # test the shape of the logits __magic_name__ : Optional[Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _A ) self.assertEqual(outputs.end_logits.shape , _A )
331
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
1
'''simple docstring''' from datetime import datetime as dt import os from github import Github lowerCAmelCase :Tuple = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Tuple = Github(os.environ['GITHUB_TOKEN'] ) __magic_name__ : str = g.get_repo('huggingface/transformers' ) __magic_name__ : int = repo.get_issues(state='open' ) for issue in open_issues: __magic_name__ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase ) __magic_name__ : Tuple = comments[0] if len(lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
331
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ : Dict = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __lowerCAmelCase ( self : List[Any] ) -> Tuple: __magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]] __magic_name__ : List[Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 ) __magic_name__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 ) __magic_name__ : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 ) __magic_name__ : Any = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
1
'''simple docstring''' import qiskit def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" __magic_name__ : List[str] = qiskit.Aer.get_backend('aer_simulator' ) __magic_name__ : Optional[Any] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator __magic_name__ : Dict = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase :Tuple = half_adder(1, 1) print(F'Half Adder Output Qubit Counts: {counts}')
331
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCAmelCase :str = logging.get_logger(__name__) lowerCAmelCase :List[str] = '''▁''' lowerCAmelCase :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase :str = { '''vocab_file''': { '''facebook/mbart-large-50-one-to-many-mmt''': ( '''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model''' ), } } lowerCAmelCase :int = { '''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4, } # fmt: off lowerCAmelCase :Any = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI'''] class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Any = VOCAB_FILES_NAMES A_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Any = PRETRAINED_VOCAB_FILES_MAP A_ : List[str] = ["""input_ids""", """attention_mask"""] A_ : List[int] = [] A_ : List[int] = [] def __init__( self : Dict , _A : Optional[Any] , _A : Any=None , _A : Dict=None , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : Optional[int]="<s>" , _A : Tuple="<unk>" , _A : Union[str, Any]="<pad>" , _A : Tuple="<mask>" , _A : Optional[Dict[str, Any]] = None , **_A : int , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __magic_name__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token __magic_name__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs __magic_name__ : Any = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_A , tgt_lang=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) __magic_name__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) __magic_name__ : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __magic_name__ : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __magic_name__ : Tuple = 1 __magic_name__ : List[Any] = len(self.sp_model ) __magic_name__ : int = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A ) } __magic_name__ : Any = {v: k for k, v in self.lang_code_to_id.items()} __magic_name__ : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __magic_name__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __magic_name__ : Optional[int] = src_lang if src_lang is not None else 'en_XX' __magic_name__ : str = self.lang_code_to_id[self._src_lang] __magic_name__ : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __lowerCAmelCase ( self : Any ) -> int: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __lowerCAmelCase ( self : Any ) -> str: return self._src_lang @src_lang.setter def __lowerCAmelCase ( self : Dict , _A : str ) -> None: __magic_name__ : Any = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ) -> Dict: __magic_name__ : Union[str, Any] = self.__dict__.copy() __magic_name__ : Tuple = None return state def __setstate__( self : List[Any] , _A : Dict ) -> None: __magic_name__ : Dict = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __magic_name__ : str = {} __magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : int ) -> Dict: __magic_name__ : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self : List[str] , _A : str ) -> List[str]: return self.sp_model.encode(_A , out_type=_A ) def __lowerCAmelCase ( self : str , _A : str ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __magic_name__ : Optional[int] = self.sp_model.PieceToId(_A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self : List[str] , _A : int ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self : Dict , _A : Union[str, Any] ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] __magic_name__ : Optional[int] = '' __magic_name__ : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_A ) + token __magic_name__ : Union[str, Any] = True __magic_name__ : Union[str, Any] = [] else: current_sub_tokens.append(_A ) __magic_name__ : str = False out_string += self.sp_model.decode(_A ) return out_string.strip() def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(_A ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __magic_name__ : Tuple = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , 'wb' ) as fi: __magic_name__ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,) def __lowerCAmelCase ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) __magic_name__ : Tuple = [1] * len(self.prefix_tokens ) __magic_name__ : str = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_A )) + suffix_ones return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones def __lowerCAmelCase ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : Dict ) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __magic_name__ : Optional[Any] = src_lang __magic_name__ : str = self(_A , add_special_tokens=_A , return_tensors=_A , **_A ) __magic_name__ : Union[str, Any] = self.convert_tokens_to_ids(_A ) __magic_name__ : List[Any] = tgt_lang_id return inputs def __lowerCAmelCase ( self : int , _A : List[str] , _A : str = "en_XX" , _A : Optional[List[str]] = None , _A : str = "ro_RO" , **_A : Dict , ) -> BatchEncoding: __magic_name__ : Tuple = src_lang __magic_name__ : str = tgt_lang return super().prepare_seqaseq_batch(_A , _A , **_A ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang ) def __lowerCAmelCase ( self : List[str] ) -> str: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __lowerCAmelCase ( self : int , _A : str ) -> None: __magic_name__ : Optional[Any] = self.lang_code_to_id[src_lang] __magic_name__ : List[str] = [self.cur_lang_code_id] __magic_name__ : int = [self.eos_token_id] def __lowerCAmelCase ( self : str , _A : str ) -> None: __magic_name__ : List[Any] = self.lang_code_to_id[tgt_lang] __magic_name__ : List[Any] = [self.cur_lang_code_id] __magic_name__ : List[Any] = [self.eos_token_id]
331
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Dict ) -> List[Any]: __magic_name__ : Optional[Any] = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __magic_name__ : Union[str, Any] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __magic_name__ : Tuple = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above __magic_name__ : Any = tf_top_k_top_p_filtering(_A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __magic_name__ : List[Any] = output[output != -float('inf' )] __magic_name__ : Any = tf.cast( tf.where(tf.not_equal(_A , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(_A , _A , rtol=1E-12 ) tf.debugging.assert_equal(_A , _A ) @require_tf class _lowerCamelCase ( unittest.TestCase , lowercase__ ): '''simple docstring''' if is_tf_available(): A_ : Optional[Any] = { """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: # TF-only test: tf.saved_model export __magic_name__ : Any = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) __magic_name__ : Tuple = 2 __magic_name__ : Tuple = 2 class _lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : Optional[int] , _A : Optional[int] ) -> int: super(_A , self ).__init__() __magic_name__ : Any = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ), ) , jit_compile=_A , ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : List[Any] ) -> Tuple: __magic_name__ : Optional[int] = self.model.generate( input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , ) return {"sequences": outputs["sequences"]} __magic_name__ : Optional[Any] = [[2, 0], [102, 103]] __magic_name__ : List[str] = [[1, 0], [1, 1]] __magic_name__ : int = DummyModel(model=_A ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_A , _A , signatures={'serving_default': dummy_model.serving} ) __magic_name__ : Optional[Any] = tf.saved_model.load(_A ).signatures['serving_default'] for batch_size in range(1 , len(_A ) + 1 ): __magic_name__ : List[Any] = { 'input_ids': tf.constant(dummy_input_ids[:batch_size] ), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ), } __magic_name__ : Optional[Any] = serving_func(**_A )['sequences'] __magic_name__ : List[Any] = test_model.generate(**_A , max_new_tokens=_A ) tf.debugging.assert_equal(_A , _A ) @slow def __lowerCAmelCase ( self : Tuple ) -> Tuple: # TF-only test: tf.saved_model export __magic_name__ : str = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) __magic_name__ : Dict = 1 __magic_name__ : Optional[Any] = 2 class _lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : Any , _A : Tuple ) -> Tuple: super(_A , self ).__init__() __magic_name__ : Union[str, Any] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ), ) , jit_compile=_A , ) def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : List[Any] = self.model.generate( input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , ) return {"sequences": outputs["sequences"]} __magic_name__ : int = [[2], [102, 103]] __magic_name__ : List[str] = [[1], [1, 1]] __magic_name__ : List[str] = DummyModel(model=_A ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_A , _A , signatures={'serving_default': dummy_model.serving} ) __magic_name__ : Optional[int] = tf.saved_model.load(_A ).signatures['serving_default'] for input_row in range(len(_A ) ): __magic_name__ : List[Any] = { 'input_ids': tf.constant([dummy_input_ids[input_row]] ), 'attention_mask': tf.constant([dummy_attention_masks[input_row]] ), } __magic_name__ : Optional[int] = serving_func(**_A )['sequences'] __magic_name__ : Tuple = test_model.generate(**_A , max_new_tokens=_A ) tf.debugging.assert_equal(_A , _A ) @slow @require_tensorflow_text def __lowerCAmelCase ( self : int ) -> List[Any]: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_A ) class _lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Union[str, Any] ) -> Dict: super().__init__() __magic_name__ : str = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(_A , 'spiece.model' ) , 'rb' ).read() ) __magic_name__ : Any = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' ) def __lowerCAmelCase ( self : Union[str, Any] , _A : Dict , *_A : Any , **_A : Tuple ) -> Any: __magic_name__ : Dict = self.tokenizer.tokenize(_A ) __magic_name__ , __magic_name__ : Dict = text.pad_model_inputs( _A , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __magic_name__ : str = self.model.generate(input_ids=_A , attention_mask=_A ) return self.tokenizer.detokenize(_A ) __magic_name__ : Union[str, Any] = CompleteSentenceTransformer() __magic_name__ : Any = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' ) __magic_name__ : Union[str, Any] = complete_model(_A ) __magic_name__ : Dict = tf.keras.Model(_A , _A ) keras_model.save(_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: # Has PT equivalent: this test relies on random sampling __magic_name__ : List[str] = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7, } __magic_name__ : str = 14 __magic_name__ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) __magic_name__ : List[str] = 'Hello, my dog is cute and' __magic_name__ : Optional[int] = tokenizer(_A , return_tensors='tf' ) __magic_name__ : List[str] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) __magic_name__ : Optional[int] = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) __magic_name__ : Tuple = model.generate(**_A , eos_token_id=_A , **_A ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __magic_name__ : Optional[Any] = [638, 198] with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) __magic_name__ : Optional[int] = model.generate(**_A , eos_token_id=_A , **_A ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple: # Has PT equivalent: ample use of framework-specific code __magic_name__ : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' ) __magic_name__ : Dict = 'Hugging Face is a technology company based in New York and Paris.' __magic_name__ : Tuple = bart_tokenizer(_A , return_tensors='tf' ).input_ids __magic_name__ : int = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' ) __magic_name__ : str = bart_model.generate(_A ).numpy() class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple , _A : Tuple , _A : Any=None , **_A : Tuple ) -> int: return super().call(_A , **_A ) __magic_name__ : str = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' ) __magic_name__ : List[str] = bart_model.generate(_A , foo='bar' ).numpy() self.assertTrue(np.array_equal(_A , _A ) ) class _lowerCamelCase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def __lowerCAmelCase ( self : int , _A : List[Any] , **_A : Any ) -> str: return super().call(_A , **_A ) __magic_name__ : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared ) __magic_name__ : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __magic_name__ : Any = bart_model.generate(_A ).numpy() with self.assertRaises(_A ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(_A , foo='bar' )
331
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowerCAmelCase :Optional[int] = None lowerCAmelCase :Any = logging.get_logger(__name__) lowerCAmelCase :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase :Any = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase :int = { '''moussaKam/mbarthez''': 1_0_2_4, '''moussaKam/barthez''': 1_0_2_4, '''moussaKam/barthez-orangesum-title''': 1_0_2_4, } lowerCAmelCase :List[str] = '''▁''' class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = VOCAB_FILES_NAMES A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["""input_ids""", """attention_mask"""] A_ : List[Any] = BarthezTokenizer def __init__( self : Any , _A : str=None , _A : Dict=None , _A : Any="<s>" , _A : Tuple="</s>" , _A : Any="</s>" , _A : Union[str, Any]="<s>" , _A : Any="<unk>" , _A : int="<pad>" , _A : Optional[Any]="<mask>" , **_A : Tuple , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it __magic_name__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( _A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , **_A , ) __magic_name__ : int = vocab_file __magic_name__ : Any = False if not self.vocab_file else True def __lowerCAmelCase ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __magic_name__ : Dict = [self.cls_token_id] __magic_name__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: __magic_name__ : Optional[Any] = [self.sep_token_id] __magic_name__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self : Dict , _A : str , _A : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_A ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __magic_name__ : Tuple = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ): copyfile(self.vocab_file , _A ) return (out_vocab_file,)
331
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() lowerCAmelCase :List[str] = logging.get_logger('''transformers.models.speecht5''') def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] ): """simple docstring""" hf_model.apply_weight_norm() __magic_name__ : List[str] = checkpoint['input_conv.weight_g'] __magic_name__ : Tuple = checkpoint['input_conv.weight_v'] __magic_name__ : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): __magic_name__ : str = checkpoint[f'upsamples.{i}.1.weight_g'] __magic_name__ : List[Any] = checkpoint[f'upsamples.{i}.1.weight_v'] __magic_name__ : Union[str, Any] = checkpoint[f'upsamples.{i}.1.bias'] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): __magic_name__ : List[str] = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g'] __magic_name__ : Optional[int] = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v'] __magic_name__ : int = checkpoint[f'blocks.{i}.convs1.{j}.1.bias'] __magic_name__ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g'] __magic_name__ : Optional[int] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v'] __magic_name__ : Union[str, Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.bias'] __magic_name__ : Tuple = checkpoint['output_conv.1.weight_g'] __magic_name__ : List[str] = checkpoint['output_conv.1.weight_v'] __magic_name__ : Optional[Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple=None , lowerCAmelCase : Union[str, Any]=None , ): """simple docstring""" if config_path is not None: __magic_name__ : List[Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase ) else: __magic_name__ : Union[str, Any] = SpeechTaHifiGanConfig() __magic_name__ : List[Any] = SpeechTaHifiGan(lowerCAmelCase ) __magic_name__ : Dict = torch.load(lowerCAmelCase ) load_weights(orig_checkpoint['model']['generator'] , lowerCAmelCase , lowerCAmelCase ) __magic_name__ : List[str] = np.load(lowerCAmelCase ) __magic_name__ : Optional[int] = stats[0].reshape(-1 ) __magic_name__ : List[Any] = stats[1].reshape(-1 ) __magic_name__ : Optional[Any] = torch.from_numpy(lowerCAmelCase ).float() __magic_name__ : Optional[Any] = torch.from_numpy(lowerCAmelCase ).float() model.save_pretrained(lowerCAmelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCAmelCase :Any = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
331
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase :int = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Optional[Any] = """beit""" def __init__( self : Optional[Any] , _A : Any=8192 , _A : int=768 , _A : Tuple=12 , _A : Tuple=12 , _A : str=3072 , _A : List[Any]="gelu" , _A : int=0.0 , _A : Dict=0.0 , _A : int=0.02 , _A : str=1E-12 , _A : int=224 , _A : Union[str, Any]=16 , _A : Dict=3 , _A : Optional[Any]=False , _A : int=False , _A : int=False , _A : List[Any]=False , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : List[Any]=True , _A : int=[3, 5, 7, 11] , _A : Tuple=[1, 2, 3, 6] , _A : Union[str, Any]=True , _A : Optional[int]=0.4 , _A : Union[str, Any]=256 , _A : Tuple=1 , _A : Dict=False , _A : int=255 , **_A : Any , ) -> Dict: super().__init__(**_A ) __magic_name__ : Optional[int] = vocab_size __magic_name__ : int = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Any = intermediate_size __magic_name__ : Union[str, Any] = hidden_act __magic_name__ : int = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : Any = initializer_range __magic_name__ : List[str] = layer_norm_eps __magic_name__ : Optional[int] = image_size __magic_name__ : Union[str, Any] = patch_size __magic_name__ : Dict = num_channels __magic_name__ : Dict = use_mask_token __magic_name__ : List[str] = use_absolute_position_embeddings __magic_name__ : str = use_relative_position_bias __magic_name__ : Optional[Any] = use_shared_relative_position_bias __magic_name__ : Dict = layer_scale_init_value __magic_name__ : Any = drop_path_rate __magic_name__ : List[str] = use_mean_pooling # decode head attributes (semantic segmentation) __magic_name__ : Dict = out_indices __magic_name__ : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) __magic_name__ : Optional[Any] = use_auxiliary_head __magic_name__ : Optional[Any] = auxiliary_loss_weight __magic_name__ : Any = auxiliary_channels __magic_name__ : str = auxiliary_num_convs __magic_name__ : Optional[Any] = auxiliary_concat_input __magic_name__ : str = semantic_loss_ignore_index class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : List[str] = version.parse("""1.11""" ) @property def __lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowerCAmelCase ( self : Tuple ) -> float: return 1E-4
331
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
1
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar lowerCAmelCase :Tuple = TypeVar('''T''') def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return (position - 1) // 2 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return (2 * position) + 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return (2 * position) + 2 class _lowerCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self : str ) -> None: __magic_name__ : list[tuple[T, int]] = [] __magic_name__ : dict[T, int] = {} __magic_name__ : int = 0 def __len__( self : Union[str, Any] ) -> int: return self.elements def __repr__( self : Dict ) -> str: return str(self.heap ) def __lowerCAmelCase ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def __lowerCAmelCase ( self : Dict , _A : T , _A : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __magic_name__ : Dict = self.elements self.elements += 1 self._bubble_up(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __magic_name__ , __magic_name__ : Optional[int] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __magic_name__ , __magic_name__ : List[str] = self.heap[0] self._bubble_down(_A ) return elem def __lowerCAmelCase ( self : int , _A : T , _A : int ) -> None: # Update the weight of the given key __magic_name__ : Optional[int] = self.position_map[elem] __magic_name__ : Union[str, Any] = (elem, weight) if position > 0: __magic_name__ : Any = get_parent_position(_A ) __magic_name__ , __magic_name__ : Union[str, Any] = self.heap[parent_position] if parent_weight > weight: self._bubble_up(_A ) else: self._bubble_down(_A ) else: self._bubble_down(_A ) def __lowerCAmelCase ( self : List[str] , _A : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __magic_name__ : List[str] = self.position_map[elem] if curr_pos == 0: return None __magic_name__ : Any = get_parent_position(_A ) __magic_name__ , __magic_name__ : List[Any] = self.heap[curr_pos] __magic_name__ , __magic_name__ : Tuple = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(_A , _A ) return self._bubble_up(_A ) return None def __lowerCAmelCase ( self : Optional[Any] , _A : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __magic_name__ : int = self.position_map[elem] __magic_name__ , __magic_name__ : Optional[Any] = self.heap[curr_pos] __magic_name__ : Tuple = get_child_left_position(_A ) __magic_name__ : Dict = get_child_right_position(_A ) if child_left_position < self.elements and child_right_position < self.elements: __magic_name__ , __magic_name__ : Optional[Any] = self.heap[child_left_position] __magic_name__ , __magic_name__ : List[Any] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) if child_left_position < self.elements: __magic_name__ , __magic_name__ : Union[str, Any] = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) else: return None if child_right_position < self.elements: __magic_name__ , __magic_name__ : List[str] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) return None def __lowerCAmelCase ( self : List[str] , _A : int , _A : int ) -> None: # Swap the nodes at the given positions __magic_name__ : Optional[int] = self.heap[nodea_pos][0] __magic_name__ : Optional[int] = self.heap[nodea_pos][0] __magic_name__ , __magic_name__ : Optional[int] = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __magic_name__ : int = nodea_pos __magic_name__ : Dict = nodea_pos class _lowerCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self : List[str] ) -> None: __magic_name__ : dict[T, dict[T, int]] = {} __magic_name__ : int = 0 def __repr__( self : Dict ) -> str: return str(self.connections ) def __len__( self : Optional[int] ) -> int: return self.nodes def __lowerCAmelCase ( self : Tuple , _A : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __magic_name__ : Dict = {} self.nodes += 1 def __lowerCAmelCase ( self : str , _A : T , _A : T , _A : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(_A ) self.add_node(_A ) __magic_name__ : int = weight __magic_name__ : List[Any] = weight def lowerCamelCase ( lowerCAmelCase : GraphUndirectedWeighted[T] , ): """simple docstring""" __magic_name__ : dict[T, int] = {node: maxsize for node in graph.connections} __magic_name__ : dict[T, T | None] = {node: None for node in graph.connections} __magic_name__ : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowerCAmelCase , lowerCAmelCase ) if priority_queue.is_empty(): return dist, parent # initialization __magic_name__ : Dict = priority_queue.extract_min() __magic_name__ : int = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __magic_name__ : List[str] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCAmelCase , dist[neighbour] ) __magic_name__ : str = node # running prim's algorithm while not priority_queue.is_empty(): __magic_name__ : Union[str, Any] = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __magic_name__ : List[Any] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCAmelCase , dist[neighbour] ) __magic_name__ : Dict = node return dist, parent
331
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
1
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def lowerCamelCase ( lowerCAmelCase : Dict ): """simple docstring""" __magic_name__ : Optional[int] = torch.exp(lowerCAmelCase ) __magic_name__ : Tuple = torch.sum(lowerCAmelCase , dim=1 ) # sum of exp(x_i) __magic_name__ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(lowerCAmelCase ) - B / A class _lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] ) -> str: super().__init__() __magic_name__ : Optional[int] = config.output_attentions __magic_name__ : Dict = config.output_hidden_states __magic_name__ : List[str] = nn.ModuleList([BertLayer(_A ) for _ in range(config.num_hidden_layers )] ) __magic_name__ : Dict = nn.ModuleList([BertHighway(_A ) for _ in range(config.num_hidden_layers )] ) __magic_name__ : Optional[Any] = [-1 for _ in range(config.num_hidden_layers )] def __lowerCAmelCase ( self : Optional[Any] , _A : int ) -> Any: if (type(_A ) is float) or (type(_A ) is int): for i in range(len(self.early_exit_entropy ) ): __magic_name__ : Union[str, Any] = x else: __magic_name__ : List[str] = x def __lowerCAmelCase ( self : Optional[int] , _A : Tuple ) -> Tuple: __magic_name__ : Optional[Any] = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def __lowerCAmelCase ( self : str , _A : str , _A : Tuple=None , _A : List[Any]=None , _A : Any=None , _A : str=None , ) -> str: __magic_name__ : Union[str, Any] = () __magic_name__ : List[str] = () __magic_name__ : Optional[Any] = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __magic_name__ : str = all_hidden_states + (hidden_states,) __magic_name__ : Union[str, Any] = layer_module( _A , _A , head_mask[i] , _A , _A ) __magic_name__ : Any = layer_outputs[0] if self.output_attentions: __magic_name__ : Any = all_attentions + (layer_outputs[1],) __magic_name__ : Any = (hidden_states,) if self.output_hidden_states: __magic_name__ : Dict = current_outputs + (all_hidden_states,) if self.output_attentions: __magic_name__ : Optional[Any] = current_outputs + (all_attentions,) __magic_name__ : List[Any] = self.highway[i](_A ) # logits, pooled_output if not self.training: __magic_name__ : List[Any] = highway_exit[0] __magic_name__ : Any = entropy(_A ) __magic_name__ : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __magic_name__ : Optional[Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __magic_name__ : Union[str, Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(_A , i + 1 ) else: __magic_name__ : Tuple = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __magic_name__ : int = all_hidden_states + (hidden_states,) __magic_name__ : List[str] = (hidden_states,) if self.output_hidden_states: __magic_name__ : Tuple = outputs + (all_hidden_states,) if self.output_attentions: __magic_name__ : str = outputs + (all_attentions,) __magic_name__ : Dict = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , lowercase__ , ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : str , _A : Dict ) -> Tuple: super().__init__(_A ) __magic_name__ : Optional[int] = config __magic_name__ : Dict = BertEmbeddings(_A ) __magic_name__ : Any = DeeBertEncoder(_A ) __magic_name__ : str = BertPooler(_A ) self.init_weights() def __lowerCAmelCase ( self : Optional[int] ) -> str: self.encoder.init_highway_pooler(self.pooler ) def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: return self.embeddings.word_embeddings def __lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any] ) -> Dict: __magic_name__ : Optional[int] = value def __lowerCAmelCase ( self : Union[str, Any] , _A : List[str] ) -> Dict: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(_A ) @add_start_docstrings_to_model_forward(_A ) def __lowerCAmelCase ( self : Any , _A : Optional[int]=None , _A : Optional[Any]=None , _A : Any=None , _A : int=None , _A : List[str]=None , _A : List[Any]=None , _A : List[Any]=None , _A : Optional[int]=None , ) -> Dict: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: __magic_name__ : int = input_ids.size() elif inputs_embeds is not None: __magic_name__ : Tuple = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) __magic_name__ : Dict = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __magic_name__ : List[Any] = torch.ones(_A , device=_A ) if encoder_attention_mask is None: __magic_name__ : Optional[Any] = torch.ones(_A , device=_A ) if token_type_ids is None: __magic_name__ : Union[str, Any] = torch.zeros(_A , dtype=torch.long , device=_A ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __magic_name__ : torch.Tensor = self.get_extended_attention_mask(_A , _A , _A ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __magic_name__ : List[str] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __magic_name__ : List[str] = encoder_attention_mask[:, None, None, :] __magic_name__ : Tuple = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __magic_name__ : Dict = (1.0 - encoder_extended_attention_mask) * -1_0000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __magic_name__ : Dict = self.get_head_mask(_A , self.config.num_hidden_layers ) __magic_name__ : str = self.embeddings( input_ids=_A , position_ids=_A , token_type_ids=_A , inputs_embeds=_A ) __magic_name__ : str = self.encoder( _A , attention_mask=_A , head_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , ) __magic_name__ : List[Any] = encoder_outputs[0] __magic_name__ : str = self.pooler(_A ) __magic_name__ : str = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[str] , _A : Union[str, Any] , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : Dict = message __magic_name__ : Optional[int] = exit_layer # start from 1! class _lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , _A : Dict ) -> Optional[int]: super().__init__() __magic_name__ : int = BertPooler(_A ) __magic_name__ : Optional[Any] = nn.Dropout(config.hidden_dropout_prob ) __magic_name__ : List[Any] = nn.Linear(config.hidden_size , config.num_labels ) def __lowerCAmelCase ( self : Optional[int] , _A : Dict ) -> str: # Pooler __magic_name__ : int = encoder_outputs[0] __magic_name__ : Optional[Any] = self.pooler(_A ) # "return" pooler_output # BertModel __magic_name__ : int = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __magic_name__ : int = bmodel_output[1] __magic_name__ : Optional[int] = self.dropout(_A ) __magic_name__ : str = self.classifier(_A ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , lowercase__ , ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : Any ) -> Dict: super().__init__(_A ) __magic_name__ : Optional[Any] = config.num_labels __magic_name__ : Union[str, Any] = config.num_hidden_layers __magic_name__ : List[Any] = DeeBertModel(_A ) __magic_name__ : List[Any] = nn.Dropout(config.hidden_dropout_prob ) __magic_name__ : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(_A ) def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any]=None , _A : int=None , _A : List[str]=None , _A : Optional[int]=None , _A : str=None , _A : Optional[int]=None , _A : int=None , _A : int=-1 , _A : Dict=False , ) -> int: __magic_name__ : Optional[int] = self.num_layers try: __magic_name__ : Any = self.bert( _A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __magic_name__ : Optional[Any] = outputs[1] __magic_name__ : str = self.dropout(_A ) __magic_name__ : Union[str, Any] = self.classifier(_A ) __magic_name__ : str = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __magic_name__ : Dict = e.message __magic_name__ : List[Any] = e.exit_layer __magic_name__ : Union[str, Any] = outputs[0] if not self.training: __magic_name__ : Tuple = entropy(_A ) __magic_name__ : Optional[int] = [] __magic_name__ : str = [] if labels is not None: if self.num_labels == 1: # We are doing regression __magic_name__ : List[Any] = MSELoss() __magic_name__ : List[str] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __magic_name__ : List[str] = CrossEntropyLoss() __magic_name__ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __magic_name__ : int = [] for highway_exit in outputs[-1]: __magic_name__ : List[str] = highway_exit[0] if not self.training: highway_logits_all.append(_A ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __magic_name__ : List[str] = MSELoss() __magic_name__ : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __magic_name__ : Optional[int] = CrossEntropyLoss() __magic_name__ : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_A ) if train_highway: __magic_name__ : Optional[int] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __magic_name__ : str = (loss,) + outputs if not self.training: __magic_name__ : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __magic_name__ : Any = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
331
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
1
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase :str = logging.get_logger(__name__) lowerCAmelCase :List[str] = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Optional[int] = """gpt_bigcode""" A_ : Optional[Any] = ["""past_key_values"""] A_ : Dict = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Tuple , _A : str=50257 , _A : Tuple=1024 , _A : Optional[int]=768 , _A : Optional[Any]=12 , _A : Optional[int]=12 , _A : Tuple=None , _A : Tuple="gelu_pytorch_tanh" , _A : List[Any]=0.1 , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=1E-5 , _A : Union[str, Any]=0.02 , _A : Union[str, Any]=True , _A : List[str]=True , _A : List[str]=50256 , _A : Tuple=50256 , _A : Tuple=True , _A : Dict=True , _A : Union[str, Any]=True , **_A : int , ) -> Any: __magic_name__ : Tuple = vocab_size __magic_name__ : int = n_positions __magic_name__ : Any = n_embd __magic_name__ : str = n_layer __magic_name__ : Union[str, Any] = n_head __magic_name__ : str = n_inner __magic_name__ : List[str] = activation_function __magic_name__ : Union[str, Any] = resid_pdrop __magic_name__ : List[str] = embd_pdrop __magic_name__ : str = attn_pdrop __magic_name__ : Optional[Any] = layer_norm_epsilon __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : List[str] = scale_attn_weights __magic_name__ : Dict = use_cache __magic_name__ : List[str] = attention_softmax_in_fpaa __magic_name__ : Dict = scale_attention_softmax_in_fpaa __magic_name__ : str = multi_query __magic_name__ : Optional[int] = bos_token_id __magic_name__ : str = eos_token_id super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
331
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
1
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets lowerCAmelCase :List[Any] = datasets.logging.get_logger(__name__) lowerCAmelCase :Any = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' lowerCAmelCase :int = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' lowerCAmelCase :Any = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' lowerCAmelCase :Union[str, Any] = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCamelCase ( datasets.Metric ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , ) def __lowerCAmelCase ( self : str , _A : Optional[Any] ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( 'Using default BLEURT-Base checkpoint for sequence maximum length 128. ' 'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' ) __magic_name__ : Dict = 'bleurt-base-128' if self.config_name.lower() in CHECKPOINT_URLS: __magic_name__ : Tuple = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __magic_name__ : Dict = self.config_name.upper() else: raise KeyError( F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' ) # download the model checkpoint specified by self.config_name and set up the scorer __magic_name__ : Tuple = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) __magic_name__ : List[Any] = score.BleurtScorer(os.path.join(_A , _A ) ) def __lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : Optional[int] = self.scorer.score(references=_A , candidates=_A ) return {"scores": scores}
331
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase :Tuple = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowerCAmelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" __magic_name__ , __magic_name__ : Any = analyze_text(lowerCAmelCase ) __magic_name__ : Optional[int] = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. __magic_name__ : Tuple = sum(single_char_strings.values() ) # one length string __magic_name__ : Dict = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __magic_name__ : Optional[Any] = single_char_strings[ch] __magic_name__ : str = my_str / all_sum my_fir_sum += prob * math.loga(lowerCAmelCase ) # entropy formula. # print entropy print(f'{round(-1 * my_fir_sum ):.1f}' ) # two len string __magic_name__ : int = sum(two_char_strings.values() ) __magic_name__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __magic_name__ : List[str] = cha + cha if sequence in two_char_strings: __magic_name__ : str = two_char_strings[sequence] __magic_name__ : Any = int(lowerCAmelCase ) / all_sum my_sec_sum += prob * math.loga(lowerCAmelCase ) # print second entropy print(f'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" __magic_name__ : Tuple = Counter() # type: ignore __magic_name__ : Any = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(lowerCAmelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def lowerCamelCase ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
331
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) A_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) A_ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} ) A_ : str = "text" A_ : str = "summary" @property def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
1
'''simple docstring''' import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : Optional[int] = torch.load(lowerCAmelCase , map_location='cpu' ) __magic_name__ : int = chkpt['model'] # We have the base model one level deeper than the original XLM repository __magic_name__ : str = {} for k, v in state_dict.items(): if "pred_layer" in k: __magic_name__ : Dict = v else: __magic_name__ : Optional[int] = v __magic_name__ : int = chkpt['params'] __magic_name__ : Tuple = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase , (torch.FloatTensor, numpy.ndarray) )} __magic_name__ : str = chkpt['dico_word2id'] __magic_name__ : Optional[Any] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()} # Save pytorch-model __magic_name__ : Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME __magic_name__ : Optional[int] = pytorch_dump_folder_path + '/' + CONFIG_NAME __magic_name__ : Union[str, Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file'] print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(lowerCAmelCase , lowerCAmelCase ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(lowerCAmelCase , indent=2 ) + '\n' ) print(f'Save vocab file to {pytorch_config_dump_path}' ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(lowerCAmelCase , indent=2 ) + '\n' ) if __name__ == "__main__": lowerCAmelCase :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase :Union[str, Any] = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
331
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
1
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Dict = 9, 14 # noqa: F841 __magic_name__ : Union[str, Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __magic_name__ : List[str] = defaultdict(lowerCAmelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __magic_name__ : str = mst(lowerCAmelCase ) __magic_name__ : Union[str, Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __magic_name__ : List[Any] = tuple(answer[:2] ) __magic_name__ : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
331
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
1
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Any = None A_ : Union[str, Any] = BloomTokenizerFast A_ : Optional[Any] = BloomTokenizerFast A_ : int = True A_ : Any = False A_ : Tuple = """tokenizer_file""" A_ : Union[str, Any] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: super().setUp() __magic_name__ : List[Any] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Optional[Any] , **_A : Union[str, Any] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ : Optional[int] = self.get_rust_tokenizer() __magic_name__ : Optional[int] = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] __magic_name__ : Tuple = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] __magic_name__ : str = tokenizer.batch_encode_plus(_A )['input_ids'] self.assertListEqual(_A , _A ) __magic_name__ : List[Any] = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int]=6 ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): __magic_name__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __magic_name__ : List[str] = 'This is a simple input' __magic_name__ : int = ['This is a simple input 1', 'This is a simple input 2'] __magic_name__ : Union[str, Any] = ('This is a simple input', 'This is a pair') __magic_name__ : List[Any] = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(_A , max_length=_A ) tokenizer_r.encode_plus(_A , max_length=_A ) tokenizer_r.batch_encode_plus(_A , max_length=_A ) tokenizer_r.encode(_A , max_length=_A ) tokenizer_r.batch_encode_plus(_A , max_length=_A ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) __magic_name__ : Tuple = None # Hotfixing padding = None self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='max_length' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='max_length' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='max_length' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='max_length' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='max_length' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='max_length' , ) def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : Optional[int] = self.get_rust_tokenizer() __magic_name__ : Tuple = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_A ) __magic_name__ : List[str] = next(iter(_A ) )['premise'] # pick up one data __magic_name__ : Any = list(sample_data.values() ) __magic_name__ : Any = list(map(tokenizer.encode , _A ) ) __magic_name__ : Tuple = [tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) for x in output_tokens] self.assertListEqual(_A , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' from __future__ import annotations class _lowerCamelCase : '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str ) -> Union[str, Any]: __magic_name__ , __magic_name__ : str = text, pattern __magic_name__ , __magic_name__ : List[Any] = len(_A ), len(_A ) def __lowerCAmelCase ( self : str , _A : str ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def __lowerCAmelCase ( self : List[Any] , _A : int ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def __lowerCAmelCase ( self : List[str] ) -> list[int]: # searches pattern in text and returns index positions __magic_name__ : List[str] = [] for i in range(self.textLen - self.patLen + 1 ): __magic_name__ : int = self.mismatch_in_text(_A ) if mismatch_index == -1: positions.append(_A ) else: __magic_name__ : int = self.match_in_pattern(self.text[mismatch_index] ) __magic_name__ : Any = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCAmelCase :Any = '''ABAABA''' lowerCAmelCase :Any = '''AB''' lowerCAmelCase :Tuple = BoyerMooreSearch(text, pattern) lowerCAmelCase :Union[str, Any] = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
331
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase :Any = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]: __magic_name__ : str = {} if "candidate_labels" in kwargs: __magic_name__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __magic_name__ : Tuple = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int: __magic_name__ : Dict = load_image(_A ) __magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) __magic_name__ : Optional[Any] = candidate_labels __magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels] __magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) __magic_name__ : Optional[Any] = [text_inputs] return inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str: __magic_name__ : str = model_inputs.pop('candidate_labels' ) __magic_name__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _A ): __magic_name__ : Dict = text_inputs[0] else: # Batching case. __magic_name__ : Optional[Any] = text_inputs[0][0] __magic_name__ : List[Any] = self.model(**_A , **_A ) __magic_name__ : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]: __magic_name__ : Tuple = model_outputs.pop('candidate_labels' ) __magic_name__ : Union[str, Any] = model_outputs['logits'][0] if self.framework == "pt": __magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) __magic_name__ : Tuple = probs.tolist() if not isinstance(_A , _A ): __magic_name__ : Any = [scores] elif self.framework == "tf": __magic_name__ : Any = stable_softmax(_A , axis=-1 ) __magic_name__ : Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __magic_name__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
331
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase :str = logging.get_logger(__name__) lowerCAmelCase :Optional[Any] = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase :List[str] = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } lowerCAmelCase :List[str] = {'''facebook/blenderbot-3B''': 1_2_8} class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : List[str] = VOCAB_FILES_NAMES A_ : int = PRETRAINED_VOCAB_FILES_MAP A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[int] = ["""input_ids""", """attention_mask"""] A_ : Union[str, Any] = BlenderbotTokenizer def __init__( self : Optional[Any] , _A : Dict=None , _A : str=None , _A : str=None , _A : Optional[int]="replace" , _A : str="<s>" , _A : int="</s>" , _A : List[Any]="</s>" , _A : List[Any]="<s>" , _A : Tuple="<unk>" , _A : str="<pad>" , _A : Dict="<mask>" , _A : Optional[Any]=False , _A : str=True , **_A : Optional[Any] , ) -> Optional[int]: super().__init__( _A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , ) __magic_name__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _A ) != add_prefix_space: __magic_name__ : Tuple = getattr(_A , pre_tok_state.pop('type' ) ) __magic_name__ : Optional[int] = add_prefix_space __magic_name__ : str = pre_tok_class(**_A ) __magic_name__ : Tuple = add_prefix_space __magic_name__ : int = 'post_processor' __magic_name__ : str = getattr(self.backend_tokenizer , _A , _A ) if tokenizer_component_instance: __magic_name__ : List[str] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __magic_name__ : Union[str, Any] = tuple(state['sep'] ) if "cls" in state: __magic_name__ : Tuple = tuple(state['cls'] ) __magic_name__ : str = False if state.get('add_prefix_space' , _A ) != add_prefix_space: __magic_name__ : Optional[int] = add_prefix_space __magic_name__ : Union[str, Any] = True if state.get('trim_offsets' , _A ) != trim_offsets: __magic_name__ : List[str] = trim_offsets __magic_name__ : Optional[int] = True if changes_to_apply: __magic_name__ : List[str] = getattr(_A , state.pop('type' ) ) __magic_name__ : str = component_class(**_A ) setattr(self.backend_tokenizer , _A , _A ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCAmelCase ( self : List[str] ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self : Optional[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value __magic_name__ : Dict = value def __lowerCAmelCase ( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> BatchEncoding: __magic_name__ : Any = kwargs.get('is_split_into_words' , _A ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_A , **_A ) def __lowerCAmelCase ( self : Optional[int] , *_A : Tuple , **_A : Union[str, Any] ) -> BatchEncoding: __magic_name__ : Any = kwargs.get('is_split_into_words' , _A ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*_A , **_A ) def __lowerCAmelCase ( self : Any , _A : str , _A : Optional[str] = None ) -> Tuple[str]: __magic_name__ : str = self._tokenizer.model.save(_A , name=_A ) return tuple(_A ) def __lowerCAmelCase ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: __magic_name__ : Optional[Any] = [self.sep_token_id] __magic_name__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> Tuple: return token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self : List[str] , _A : "Conversation" ) -> List[int]: __magic_name__ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(_A ) __magic_name__ : int = ' '.join(_A ) __magic_name__ : Any = self.encode(_A ) if len(_A ) > self.model_max_length: __magic_name__ : Any = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
331
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging lowerCAmelCase :Any = logging.get_logger(__name__) lowerCAmelCase :int = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase :Optional[Any] = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } lowerCAmelCase :Optional[int] = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) lowerCAmelCase :List[Any] = 0 lowerCAmelCase :int = 1 lowerCAmelCase :Any = 2 lowerCAmelCase :Dict = 3 lowerCAmelCase :Dict = 4 class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Optional[Any] = VOCAB_FILES_NAMES A_ : int = PRETRAINED_VOCAB_FILES_MAP A_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : int = """left""" def __init__( self : List[str] , _A : Dict , _A : Any=False , _A : List[str]=True , _A : Dict=False , _A : int="<s>" , _A : int="</s>" , _A : Tuple="<unk>" , _A : Union[str, Any]="<sep>" , _A : Dict="<pad>" , _A : List[Any]="<cls>" , _A : Optional[int]="<mask>" , _A : List[Any]=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : str , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __magic_name__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token __magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) __magic_name__ : str = 3 __magic_name__ : Any = do_lower_case __magic_name__ : Dict = remove_space __magic_name__ : int = keep_accents __magic_name__ : Dict = vocab_file __magic_name__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_A ) @property def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: return len(self.sp_model ) def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: __magic_name__ : str = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.__dict__.copy() __magic_name__ : str = None return state def __setstate__( self : Any , _A : List[Any] ) -> Dict: __magic_name__ : Optional[Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __magic_name__ : Any = {} __magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] ) -> Optional[int]: if self.remove_space: __magic_name__ : Dict = ' '.join(inputs.strip().split() ) else: __magic_name__ : Optional[Any] = inputs __magic_name__ : int = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: __magic_name__ : Optional[int] = unicodedata.normalize('NFKD' , _A ) __magic_name__ : Tuple = ''.join([c for c in outputs if not unicodedata.combining(_A )] ) if self.do_lower_case: __magic_name__ : Union[str, Any] = outputs.lower() return outputs def __lowerCAmelCase ( self : List[Any] , _A : str ) -> List[str]: __magic_name__ : Tuple = self.preprocess_text(_A ) __magic_name__ : Union[str, Any] = self.sp_model.encode(_A , out_type=_A ) __magic_name__ : Union[str, Any] = [] for piece in pieces: if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): __magic_name__ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __magic_name__ : Optional[Any] = cur_pieces[1:] else: __magic_name__ : Dict = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_A ) else: new_pieces.append(_A ) return new_pieces def __lowerCAmelCase ( self : str , _A : Tuple ) -> Optional[Any]: return self.sp_model.PieceToId(_A ) def __lowerCAmelCase ( self : List[Any] , _A : Union[str, Any] ) -> Optional[Any]: return self.sp_model.IdToPiece(_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = ''.join(_A ).replace(_A , ' ' ).strip() return out_string def __lowerCAmelCase ( self : Tuple , _A : List[int] , _A : bool = False , _A : bool = None , _A : bool = True , **_A : str , ) -> str: __magic_name__ : List[Any] = kwargs.pop('use_source_tokenizer' , _A ) __magic_name__ : List[Any] = self.convert_ids_to_tokens(_A , skip_special_tokens=_A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __magic_name__ : Union[str, Any] = [] __magic_name__ : int = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A ) ) __magic_name__ : int = [] sub_texts.append(_A ) else: current_sub_text.append(_A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens __magic_name__ : Tuple = ''.join(_A ) __magic_name__ : List[str] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __magic_name__ : List[Any] = self.clean_up_tokenization(_A ) return clean_text else: return text def __lowerCAmelCase ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: __magic_name__ : Optional[Any] = [self.sep_token_id] __magic_name__ : str = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __lowerCAmelCase ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1] return ([0] * len(_A )) + [1, 1] def __lowerCAmelCase ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: __magic_name__ : str = [self.sep_token_id] __magic_name__ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __lowerCAmelCase ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(_A ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __magic_name__ : int = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , 'wb' ) as fi: __magic_name__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
331
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ : Dict = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __lowerCAmelCase ( self : List[Any] ) -> Tuple: __magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]] __magic_name__ : List[Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 ) __magic_name__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 ) __magic_name__ : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 ) __magic_name__ : Any = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
1
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black lowerCAmelCase :Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowerCAmelCase :List[Any] = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : Union[str, Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) ) __magic_name__ : Any = self.transformer_dir shutil.copy( os.path.join(_A , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , ) def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: __magic_name__ : str = 'src/transformers' shutil.rmtree(self.transformer_dir ) def __lowerCAmelCase ( self : str , _A : Any , _A : Any , _A : Union[str, Any] , _A : int=None ) -> Union[str, Any]: __magic_name__ : int = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: __magic_name__ : List[str] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result __magic_name__ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) __magic_name__ : str = black.format_str(_A , mode=_A ) __magic_name__ : List[Any] = os.path.join(self.transformer_dir , 'new_code.py' ) with open(_A , 'w' , newline='\n' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , 'r' ) as f: self.assertTrue(f.read() , _A ) def __lowerCAmelCase ( self : Any ) -> List[Any]: __magic_name__ : List[str] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' ) self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: # Base copy consistency self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , _A , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , _A ) , ) # Copy consistency with a really long name __magic_name__ : List[str] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub('Bert' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , _A , overwrite_result=re.sub('Bert' , 'TestModel' , _A ) , ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: __magic_name__ : Tuple = check_copies.LOCALIZED_READMES['README_zh-hans.md'] __magic_name__ : Dict = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the' ' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for' ' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong' ' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.' ' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),' ' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and' ' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same' ' method has been applied to compress GPT2 into' ' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into' ' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),' ' Multilingual BERT into' ' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German' ' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**' ' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders' ' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang' ' Luong, Quoc V. Le, Christopher D. Manning.' ) __magic_name__ : Optional[Any] = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) __magic_name__ : Optional[Any] = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.' ' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文' ' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and' ' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same' ' method has been applied to compress GPT2 into' ' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into' ' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),' ' Multilingual BERT into' ' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German' ' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自' ' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather' ' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,' ' Christopher D. Manning 发布。\n' ) __magic_name__ , __magic_name__ : str = check_copies.convert_to_localized_md( _A , _A , localized_readme['format_model_list'] ) self.assertFalse(_A ) self.assertEqual(_A , _A ) __magic_name__ , __magic_name__ : Union[str, Any] = check_copies.convert_to_localized_md( _A , _A , localized_readme['format_model_list'] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(_A ) __magic_name__ : Dict = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the' ' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for' ' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong' ' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.' ) __magic_name__ : List[Any] = ( '1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and' ' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) __magic_name__ : Dict = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) __magic_name__ , __magic_name__ : Union[str, Any] = check_copies.convert_to_localized_md( _A , _A , localized_readme['format_model_list'] ) # Check if the model link is synchronized. self.assertEqual(_A , _A )
331
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
1
'''simple docstring''' from __future__ import annotations lowerCAmelCase :Any = '''#''' class _lowerCamelCase : '''simple docstring''' def __init__( self : int ) -> None: __magic_name__ : dict = {} def __lowerCAmelCase ( self : Any , _A : str ) -> None: __magic_name__ : Tuple = self._trie for char in text: if char not in trie: __magic_name__ : List[Any] = {} __magic_name__ : Any = trie[char] __magic_name__ : Tuple = True def __lowerCAmelCase ( self : Any , _A : str ) -> tuple | list: __magic_name__ : str = self._trie for char in prefix: if char in trie: __magic_name__ : Optional[int] = trie[char] else: return [] return self._elements(_A ) def __lowerCAmelCase ( self : Any , _A : dict ) -> tuple: __magic_name__ : Optional[int] = [] for c, v in d.items(): __magic_name__ : Any = [' '] if c == END else [(c + s) for s in self._elements(_A )] result.extend(_A ) return tuple(_A ) lowerCAmelCase :Dict = Trie() lowerCAmelCase :Optional[int] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" __magic_name__ : List[str] = trie.find_word(lowerCAmelCase ) return tuple(string + word for word in suffixes ) def lowerCamelCase ( ): """simple docstring""" print(autocomplete_using_trie('de' ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
331
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
1
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _lowerCamelCase ( lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' A_ : List[Any] = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Union[str, Any] , _A : int , _A : int , _A : Optional[int] = None , _A : int = 50257 , _A : int = 1024 , _A : int = 768 , _A : int = 12 , _A : int = 12 , _A : Optional[int] = None , _A : str = "gelu_new" , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 1E-5 , _A : float = 0.02 , _A : bool = True , _A : bool = True , _A : bool = False , _A : bool = False , ) -> List[Any]: super().__init__() __magic_name__ : Union[str, Any] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and' F' `n_embd`: {n_embd} are not equal.' ) __magic_name__ : List[str] = prefix_inner_dim __magic_name__ : Union[str, Any] = prefix_hidden_dim __magic_name__ : Optional[int] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __magic_name__ : str = ( nn.Linear(self.prefix_hidden_dim , _A ) if self.prefix_hidden_dim is not None else nn.Identity() ) __magic_name__ : Optional[int] = GPTaConfig( vocab_size=_A , n_positions=_A , n_embd=_A , n_layer=_A , n_head=_A , n_inner=_A , activation_function=_A , resid_pdrop=_A , embd_pdrop=_A , attn_pdrop=_A , layer_norm_epsilon=_A , initializer_range=_A , scale_attn_weights=_A , use_cache=_A , scale_attn_by_inverse_layer_idx=_A , reorder_and_upcast_attn=_A , ) __magic_name__ : int = GPTaLMHeadModel(_A ) def __lowerCAmelCase ( self : Any , _A : torch.Tensor , _A : torch.Tensor , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , ) -> str: __magic_name__ : int = self.transformer.transformer.wte(_A ) __magic_name__ : List[str] = self.encode_prefix(_A ) __magic_name__ : Any = self.decode_prefix(_A ) __magic_name__ : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: __magic_name__ : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) __magic_name__ : int = torch.cat((dummy_token, input_ids) , dim=1 ) __magic_name__ : Union[str, Any] = self.transformer(inputs_embeds=_A , labels=_A , attention_mask=_A ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __lowerCAmelCase ( self : Any , _A : int , _A : torch.device ) -> torch.Tensor: return torch.zeros(_A , self.prefix_length , dtype=torch.intaa , device=_A ) def __lowerCAmelCase ( self : List[Any] , _A : Dict ) -> List[Any]: return self.encode_prefix(_A ) @torch.no_grad() def __lowerCAmelCase ( self : str , _A : Optional[int] , _A : Tuple , _A : List[str] ) -> Union[str, Any]: __magic_name__ : Union[str, Any] = torch.split(_A , 1 , dim=0 ) __magic_name__ : str = [] __magic_name__ : Optional[Any] = [] for feature in features: __magic_name__ : int = self.decode_prefix(feature.to(_A ) ) # back to the clip feature # Only support beam search for now __magic_name__ , __magic_name__ : Any = self.generate_beam( input_embeds=_A , device=_A , eos_token_id=_A ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __magic_name__ : List[str] = torch.stack(_A ) __magic_name__ : Optional[int] = torch.stack(_A ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __lowerCAmelCase ( self : Dict , _A : List[str]=None , _A : Any=None , _A : int=None , _A : int = 5 , _A : int = 67 , _A : float = 1.0 , _A : Optional[int] = None , ) -> List[Any]: __magic_name__ : List[Any] = eos_token_id __magic_name__ : int = None __magic_name__ : Optional[Any] = None __magic_name__ : Dict = torch.ones(_A , device=_A , dtype=torch.int ) __magic_name__ : int = torch.zeros(_A , device=_A , dtype=torch.bool ) if input_embeds is not None: __magic_name__ : Optional[int] = input_embeds else: __magic_name__ : Optional[int] = self.transformer.transformer.wte(_A ) for i in range(_A ): __magic_name__ : Optional[int] = self.transformer(inputs_embeds=_A ) __magic_name__ : Dict = outputs.logits __magic_name__ : List[str] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __magic_name__ : str = logits.softmax(-1 ).log() if scores is None: __magic_name__ , __magic_name__ : Tuple = logits.topk(_A , -1 ) __magic_name__ : Optional[int] = generated.expand(_A , *generated.shape[1:] ) __magic_name__ , __magic_name__ : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: __magic_name__ : Optional[int] = next_tokens else: __magic_name__ : str = tokens.expand(_A , *tokens.shape[1:] ) __magic_name__ : int = torch.cat((tokens, next_tokens) , dim=1 ) else: __magic_name__ : Tuple = -float(np.inf ) __magic_name__ : Any = 0 __magic_name__ : Union[str, Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __magic_name__ : int = scores_sum / seq_lengths[:, None] __magic_name__ , __magic_name__ : List[str] = scores_sum_average.view(-1 ).topk(_A , -1 ) __magic_name__ : Any = next_tokens // scores_sum.shape[1] __magic_name__ : Tuple = seq_lengths[next_tokens_source] __magic_name__ : Union[str, Any] = next_tokens % scores_sum.shape[1] __magic_name__ : Tuple = next_tokens.unsqueeze(1 ) __magic_name__ : Union[str, Any] = tokens[next_tokens_source] __magic_name__ : List[str] = torch.cat((tokens, next_tokens) , dim=1 ) __magic_name__ : List[Any] = generated[next_tokens_source] __magic_name__ : List[str] = scores_sum_average * seq_lengths __magic_name__ : Union[str, Any] = is_stopped[next_tokens_source] __magic_name__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) __magic_name__ : Any = torch.cat((generated, next_token_embed) , dim=1 ) __magic_name__ : Union[str, Any] = is_stopped + next_tokens.eq(_A ).squeeze() if is_stopped.all(): break __magic_name__ : Optional[int] = scores / seq_lengths __magic_name__ : Optional[int] = scores.argsort(descending=_A ) # tokens tensors are already padded to max_seq_length __magic_name__ : Optional[int] = [tokens[i] for i in order] __magic_name__ : List[str] = torch.stack(_A , dim=0 ) __magic_name__ : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
331
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
1
'''simple docstring''' # Algorithm for the pigeonhole sorting def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" __magic_name__ : str = min(lowerCAmelCase ) # min() finds the minimum value __magic_name__ : int = max(lowerCAmelCase ) # max() finds the maximum value __magic_name__ : Dict = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size __magic_name__ : str = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(lowerCAmelCase , lowerCAmelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. __magic_name__ : Tuple = 0 for count in range(lowerCAmelCase ): while holes[count] > 0: holes[count] -= 1 __magic_name__ : Optional[int] = count + min_val i += 1 def lowerCamelCase ( ): """simple docstring""" __magic_name__ : int = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(lowerCAmelCase ) print('Sorted order is:' , ' '.join(lowerCAmelCase ) ) if __name__ == "__main__": main()
331
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule lowerCAmelCase :List[Any] = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :List[Any] = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :List[str] = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
1
'''simple docstring''' import math import os import sys def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" __magic_name__ : List[Any] = '' try: with open(lowerCAmelCase , 'rb' ) as binary_file: __magic_name__ : Tuple = binary_file.read() for dat in data: __magic_name__ : Optional[int] = f'{dat:08b}' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def lowerCamelCase ( lowerCAmelCase : dict[str, str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" lexicon.pop(lowerCAmelCase ) __magic_name__ : str = last_match_id if math.loga(lowerCAmelCase ).is_integer(): for curr_key in lexicon: __magic_name__ : Tuple = '0' + lexicon[curr_key] __magic_name__ : Union[str, Any] = bin(lowerCAmelCase )[2:] def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" __magic_name__ : List[str] = {'0': '0', '1': '1'} __magic_name__ , __magic_name__ : str = '', '' __magic_name__ : str = len(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __magic_name__ : List[Any] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) index += 1 __magic_name__ : List[str] = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __magic_name__ : Union[str, Any] = lexicon[curr_string] result += last_match_id return result def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str ): """simple docstring""" __magic_name__ : Union[str, Any] = os.path.getsize(lowerCAmelCase ) __magic_name__ : Optional[int] = bin(lowerCAmelCase )[2:] __magic_name__ : Any = len(lowerCAmelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str ): """simple docstring""" __magic_name__ : List[str] = 8 try: with open(lowerCAmelCase , 'wb' ) as opened_file: __magic_name__ : Optional[Any] = [ to_write[i : i + byte_length] for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(lowerCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str ): """simple docstring""" __magic_name__ : Optional[Any] = read_file_binary(lowerCAmelCase ) __magic_name__ : Any = compress_data(lowerCAmelCase ) __magic_name__ : Tuple = add_file_length(lowerCAmelCase , lowerCAmelCase ) write_file_binary(lowerCAmelCase , lowerCAmelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
1
'''simple docstring''' import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowerCAmelCase :Optional[int] = logging.get_logger(__name__) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" __magic_name__ : Optional[Any] = set() __magic_name__ : List[Any] = [] def parse_line(lowerCAmelCase : int ): for line in fp: if isinstance(lowerCAmelCase , lowerCAmelCase ): __magic_name__ : Union[str, Any] = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(lowerCAmelCase ) > 0: __magic_name__ : int = '\n'.join(lowerCAmelCase ) # Only keep the warnings specified in `targets` if any(f': {x}: ' in warning for x in targets ): selected_warnings.add(lowerCAmelCase ) buffer.clear() continue else: __magic_name__ : Optional[int] = line.strip() buffer.append(lowerCAmelCase ) if from_gh: for filename in os.listdir(lowerCAmelCase ): __magic_name__ : int = os.path.join(lowerCAmelCase , lowerCAmelCase ) if not os.path.isdir(lowerCAmelCase ): # read the file if filename != "warnings.txt": continue with open(lowerCAmelCase ) as fp: parse_line(lowerCAmelCase ) else: try: with zipfile.ZipFile(lowerCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCAmelCase ): # read the file if filename != "warnings.txt": continue with z.open(lowerCAmelCase ) as fp: parse_line(lowerCAmelCase ) except Exception: logger.warning( f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : List[Any] = set() __magic_name__ : Dict = [os.path.join(lowerCAmelCase , lowerCAmelCase ) for p in os.listdir(lowerCAmelCase ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase , lowerCAmelCase ) ) return selected_warnings if __name__ == "__main__": def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" return values.split(',' ) lowerCAmelCase :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') # optional parameters parser.add_argument( '''--targets''', default='''DeprecationWarning,UserWarning,FutureWarning''', type=list_str, help='''Comma-separated list of target warning(s) which we want to extract.''', ) parser.add_argument( '''--from_gh''', action='''store_true''', help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''', ) lowerCAmelCase :Optional[Any] = parser.parse_args() lowerCAmelCase :Tuple = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowerCAmelCase :List[str] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('''=''' * 8_0) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowerCAmelCase :int = extract_warnings(args.output_dir, args.targets) lowerCAmelCase :Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
331
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
1
'''simple docstring''' class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[int] , _A : int ) -> None: __magic_name__ : List[str] = size __magic_name__ : List[str] = [0] * size __magic_name__ : Optional[int] = [0] * size @staticmethod def __lowerCAmelCase ( _A : int ) -> int: return index | (index + 1) @staticmethod def __lowerCAmelCase ( _A : int ) -> int: return (index & (index + 1)) - 1 def __lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : int ) -> None: __magic_name__ : Dict = value while index < self.size: __magic_name__ : str = self.get_prev(_A ) + 1 if current_left_border == index: __magic_name__ : Dict = value else: __magic_name__ : str = max(_A , _A , _A ) __magic_name__ : Optional[Any] = self.get_next(_A ) def __lowerCAmelCase ( self : str , _A : int , _A : int ) -> int: right -= 1 # Because of right is exclusive __magic_name__ : Optional[Any] = 0 while left <= right: __magic_name__ : str = self.get_prev(_A ) if left <= current_left: __magic_name__ : Dict = max(_A , self.tree[right] ) __magic_name__ : Optional[Any] = current_left else: __magic_name__ : str = max(_A , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
331
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
1
'''simple docstring''' import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging lowerCAmelCase :int = logging.get_logger(__name__) def lowerCamelCase ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ): """simple docstring""" __magic_name__ : Optional[int] = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(lowerCAmelCase ) == len(lowerCAmelCase ), f'{len(lowerCAmelCase )} != {len(lowerCAmelCase )}' dest_layers.load_state_dict(layers_to_copy.state_dict() ) lowerCAmelCase :Union[str, Any] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 1_2: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 1_1], 4: [0, 4, 8, 1_1], 6: [0, 2, 4, 7, 9, 1_1], 9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1], 1_2: list(range(1_2)), }, 1_6: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 1_5], 3: [0, 8, 1_5], 4: [0, 5, 1_0, 1_5], 6: [0, 3, 6, 9, 1_2, 1_5], 8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5], 9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5], 1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5], 1_6: list(range(1_6)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } lowerCAmelCase :Optional[Any] = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]}, 1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]}, } def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ): """simple docstring""" try: __magic_name__ : List[Any] = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first' f' {n_student}' ) return list(range(lowerCAmelCase ) ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] ): """simple docstring""" if n_student > n_teacher: raise ValueError(f'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' ) elif n_teacher == n_student: return list(range(lowerCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def lowerCamelCase ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[Any] , ): """simple docstring""" __magic_name__ : Optional[Any] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(lowerCAmelCase , lowerCAmelCase ): AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience __magic_name__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval() else: assert isinstance(lowerCAmelCase , lowerCAmelCase ), f'teacher must be a model or string got type {type(lowerCAmelCase )}' __magic_name__ : str = teacher.config.to_diff_dict() try: __magic_name__ , __magic_name__ : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: __magic_name__ : Dict = teacher_e if d is None: __magic_name__ : List[str] = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): __magic_name__ , __magic_name__ : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: __magic_name__ , __magic_name__ : Any = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: __magic_name__ : Any = teacher_e if d is None: __magic_name__ : Any = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(lowerCAmelCase ) # Copy weights __magic_name__ : Optional[Any] = teacher.config_class(**lowerCAmelCase ) __magic_name__ : Dict = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. __magic_name__ : Dict = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save __magic_name__ , __magic_name__ : List[str] = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) ) logger.info( f'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to' f' {save_path}' ) student.save_pretrained(lowerCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: __magic_name__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase ) if d_layers_to_copy is None: __magic_name__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase ) try: if hasattr( lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase ) copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase ) logger.info( f'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' ) __magic_name__ : Tuple = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(lowerCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
331
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[int] , _A : Tuple , _A : Any , _A : str ) -> Tuple: __magic_name__ : Optional[int] = dataset __magic_name__ : Dict = process __magic_name__ : str = params def __len__( self : List[str] ) -> List[str]: return len(self.dataset ) def __getitem__( self : int , _A : Optional[Any] ) -> Tuple: __magic_name__ : Optional[int] = self.dataset[i] __magic_name__ : Dict = self.process(_A , **self.params ) return processed class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : int , _A : Optional[int] , _A : List[str] , _A : Optional[int] , _A : Tuple=None ) -> Union[str, Any]: __magic_name__ : Optional[Any] = loader __magic_name__ : str = infer __magic_name__ : Optional[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether __magic_name__ : Any = None __magic_name__ : Union[str, Any] = loader_batch_size # Internal bookkeeping __magic_name__ : str = None __magic_name__ : int = None def __len__( self : Dict ) -> int: return len(self.loader ) def __iter__( self : List[str] ) -> Tuple: __magic_name__ : Optional[int] = iter(self.loader ) return self def __lowerCAmelCase ( self : str ) -> Any: if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice __magic_name__ : Any = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) __magic_name__ : Dict = {} for k, element in self._loader_batch_data.items(): if isinstance(_A , _A ): # Convert ModelOutput to tuple first __magic_name__ : Tuple = element.to_tuple() if isinstance(element[0] , torch.Tensor ): __magic_name__ : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __magic_name__ : Any = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): __magic_name__ : int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __magic_name__ : List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around __magic_name__ : Dict = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __magic_name__ : Optional[int] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __magic_name__ : Dict = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. __magic_name__ : List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 __magic_name__ : str = self._loader_batch_data.__class__(_A ) self._loader_batch_index += 1 return result def __lowerCAmelCase ( self : List[Any] ) -> Dict: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch __magic_name__ : Dict = next(self.iterator ) __magic_name__ : int = self.infer(_A , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_A , torch.Tensor ): __magic_name__ : Dict = processed else: __magic_name__ : int = list(processed.keys() )[0] __magic_name__ : str = processed[key] if isinstance(_A , _A ): __magic_name__ : Optional[Any] = len(_A ) else: __magic_name__ : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __magic_name__ : List[str] = observed_batch_size # Setting internal index to unwrap the batch __magic_name__ : Any = processed __magic_name__ : str = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : Any , _A : Dict , _A : Optional[Any] , _A : Union[str, Any]=None ) -> Dict: super().__init__(_A , _A , _A ) def __iter__( self : int ) -> Dict: __magic_name__ : int = iter(self.loader ) __magic_name__ : Dict = None return self def __lowerCAmelCase ( self : int ) -> Union[str, Any]: if self.subiterator is None: __magic_name__ : Any = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item __magic_name__ : Optional[Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators __magic_name__ : Dict = self.infer(next(self.iterator ) , **self.params ) __magic_name__ : Tuple = next(self.subiterator ) return processed class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __iter__( self : Dict ) -> Dict: __magic_name__ : Tuple = iter(self.loader ) return self def __lowerCAmelCase ( self : Optional[Any] ) -> int: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. __magic_name__ : Tuple = False __magic_name__ : Dict = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: __magic_name__ : Optional[int] = self.loader_batch_item() __magic_name__ : List[Any] = item.pop('is_last' ) accumulator.append(_A ) if is_last: return accumulator while not is_last: __magic_name__ : Dict = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_A , torch.Tensor ): __magic_name__ : Union[str, Any] = processed else: __magic_name__ : Union[str, Any] = list(processed.keys() )[0] __magic_name__ : Union[str, Any] = processed[key] if isinstance(_A , _A ): __magic_name__ : Union[str, Any] = len(_A ) else: __magic_name__ : Tuple = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __magic_name__ : str = observed_batch_size __magic_name__ : int = processed __magic_name__ : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: __magic_name__ : Dict = self.loader_batch_item() __magic_name__ : Optional[int] = item.pop('is_last' ) accumulator.append(_A ) if is_last: return accumulator else: __magic_name__ : List[Any] = processed __magic_name__ : Tuple = item.pop('is_last' ) accumulator.append(_A ) return accumulator class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[int] , _A : Dataset , _A : str ) -> Union[str, Any]: __magic_name__ : Optional[int] = dataset __magic_name__ : Optional[Any] = key def __len__( self : Any ) -> int: return len(self.dataset ) def __getitem__( self : List[str] , _A : List[str] ) -> Optional[int]: return self.dataset[i][self.key] class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[int] , _A : Dataset , _A : str , _A : str ) -> Any: __magic_name__ : Optional[Any] = dataset __magic_name__ : str = keya __magic_name__ : Optional[int] = keya def __len__( self : Optional[int] ) -> int: return len(self.dataset ) def __getitem__( self : str , _A : Union[str, Any] ) -> Dict: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
331
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
1
'''simple docstring''' class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Union[str, Any] ) -> Tuple: # we need a list not a string, so do something to change the type __magic_name__ : Tuple = arr.split(',' ) def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Any = [int(self.array[0] )] * len(self.array ) __magic_name__ : int = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): __magic_name__ : Dict = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) __magic_name__ : List[Any] = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": lowerCAmelCase :Union[str, Any] = input('''please input some numbers:''') lowerCAmelCase :Union[str, Any] = SubArray(whole_array) lowerCAmelCase :List[Any] = array.solve_sub_array() print(('''the results is:''', re))
331
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase :Optional[int] = logging.get_logger(__name__) lowerCAmelCase :Any = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = """bert""" def __init__( self : List[Any] , _A : int=30522 , _A : List[str]=768 , _A : Optional[Any]=12 , _A : Union[str, Any]=12 , _A : Optional[Any]=3072 , _A : str="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Union[str, Any]=512 , _A : List[Any]=2 , _A : Dict=0.02 , _A : Optional[int]=1E-12 , _A : Dict=0 , _A : Any="absolute" , _A : Optional[int]=True , _A : List[str]=None , **_A : Union[str, Any] , ) -> List[str]: super().__init__(pad_token_id=_A , **_A ) __magic_name__ : List[Any] = vocab_size __magic_name__ : Union[str, Any] = hidden_size __magic_name__ : List[str] = num_hidden_layers __magic_name__ : Tuple = num_attention_heads __magic_name__ : List[Any] = hidden_act __magic_name__ : Dict = intermediate_size __magic_name__ : List[str] = hidden_dropout_prob __magic_name__ : Dict = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Tuple = type_vocab_size __magic_name__ : Tuple = initializer_range __magic_name__ : str = layer_norm_eps __magic_name__ : Tuple = position_embedding_type __magic_name__ : List[str] = use_cache __magic_name__ : Optional[Any] = classifier_dropout class _lowerCamelCase ( lowercase__ ): '''simple docstring''' @property def __lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __magic_name__ : str = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __magic_name__ : Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' from typing import Any class _lowerCamelCase : '''simple docstring''' def __init__( self : int , _A : Any ) -> Any: __magic_name__ : List[str] = data __magic_name__ : Tuple = None def __repr__( self : Tuple ) -> str: return F'Node({self.data})' class _lowerCamelCase : '''simple docstring''' def __init__( self : str ) -> List[Any]: __magic_name__ : Tuple = None def __iter__( self : Tuple ) -> Any: __magic_name__ : Dict = self.head while node: yield node.data __magic_name__ : Optional[int] = node.next def __len__( self : int ) -> int: return sum(1 for _ in self ) def __repr__( self : Optional[Any] ) -> str: return "->".join([str(_A ) for item in self] ) def __getitem__( self : Optional[Any] , _A : int ) -> Any: if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : List[str] , _A : int , _A : Any ) -> None: if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) __magic_name__ : Optional[int] = self.head for _ in range(_A ): __magic_name__ : Tuple = current.next __magic_name__ : Union[str, Any] = data def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> None: self.insert_nth(len(self ) , _A ) def __lowerCAmelCase ( self : Union[str, Any] , _A : Any ) -> None: self.insert_nth(0 , _A ) def __lowerCAmelCase ( self : Optional[int] , _A : int , _A : Any ) -> None: if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) __magic_name__ : Optional[int] = Node(_A ) if self.head is None: __magic_name__ : List[str] = new_node elif index == 0: __magic_name__ : Optional[int] = self.head # link new_node to head __magic_name__ : Union[str, Any] = new_node else: __magic_name__ : Union[str, Any] = self.head for _ in range(index - 1 ): __magic_name__ : Dict = temp.next __magic_name__ : List[str] = temp.next __magic_name__ : Optional[int] = new_node def __lowerCAmelCase ( self : Tuple ) -> None: # print every node data print(self ) def __lowerCAmelCase ( self : Tuple ) -> Any: return self.delete_nth(0 ) def __lowerCAmelCase ( self : Dict ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowerCAmelCase ( self : Optional[Any] , _A : int = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) __magic_name__ : int = self.head # default first node if index == 0: __magic_name__ : int = self.head.next else: __magic_name__ : Dict = self.head for _ in range(index - 1 ): __magic_name__ : Any = temp.next __magic_name__ : Optional[int] = temp.next __magic_name__ : str = temp.next.next return delete_node.data def __lowerCAmelCase ( self : Optional[Any] ) -> bool: return self.head is None def __lowerCAmelCase ( self : Optional[int] ) -> None: __magic_name__ : str = None __magic_name__ : List[Any] = self.head while current: # Store the current node's next node. __magic_name__ : Dict = current.next # Make the current node's next point backwards __magic_name__ : Any = prev # Make the previous node be the current node __magic_name__ : str = current # Make the current node the next node (to progress iteration) __magic_name__ : Any = next_node # Return prev in order to put the head at the end __magic_name__ : Dict = prev def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[Any] = LinkedList() assert linked_list.is_empty() is True assert str(lowerCAmelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(lowerCAmelCase ) == i linked_list.insert_nth(lowerCAmelCase , i + 1 ) assert str(lowerCAmelCase ) == "->".join(str(lowerCAmelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(lowerCAmelCase ) == "->".join(str(lowerCAmelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(lowerCAmelCase ) == 9 assert str(lowerCAmelCase ) == "->".join(str(lowerCAmelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __magic_name__ : List[str] = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(lowerCAmelCase ) == "->".join(str(lowerCAmelCase ) for i in range(-8 , 1 ) ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [ -9, 100, Node(7734_5112 ), 'dlrow olleH', 7, 5555, 0, -192.5_5555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] __magic_name__ : Union[str, Any] = LinkedList() for i in test_input: linked_list.insert_tail(lowerCAmelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(lowerCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __magic_name__ : Any = linked_list.delete_head() assert result == -9 assert ( str(lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __magic_name__ : Optional[Any] = linked_list.delete_tail() assert result == 12.2 assert ( str(lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __magic_name__ : Dict = linked_list.delete_nth(10 ) assert result is None assert ( str(lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(lowerCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(lowerCAmelCase ) assert ( str(lowerCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(lowerCAmelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCamelCase ( ): """simple docstring""" from doctest import testmod testmod() __magic_name__ : List[Any] = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(lowerCAmelCase ) print('\nReading/changing Node data using indexing:' ) print(f'Element at Position 1: {linked_list[1]}' ) __magic_name__ : List[Any] = input('Enter New Value: ' ).strip() print('New list:' ) print(lowerCAmelCase ) print(f'length of linked_list is : {len(lowerCAmelCase )}' ) if __name__ == "__main__": main()
331
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
1
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase : '''simple docstring''' def __init__( self : int , _A : Tuple , _A : List[str]=2 , _A : Optional[Any]=8 , _A : Optional[int]=True , _A : List[Any]=True , _A : Dict=True , _A : Union[str, Any]=True , _A : Tuple=99 , _A : List[Any]=16 , _A : Any=5 , _A : str=2 , _A : Optional[Any]=36 , _A : Optional[Any]="gelu" , _A : Any=0.0 , _A : Union[str, Any]=0.0 , _A : List[Any]=512 , _A : Optional[int]=16 , _A : Tuple=2 , _A : Optional[Any]=0.02 , _A : int=3 , _A : List[str]=4 , _A : Optional[int]=None , ) -> str: __magic_name__ : Dict = parent __magic_name__ : int = batch_size __magic_name__ : str = seq_length __magic_name__ : Any = is_training __magic_name__ : Tuple = use_input_mask __magic_name__ : Any = use_token_type_ids __magic_name__ : List[str] = use_labels __magic_name__ : Dict = vocab_size __magic_name__ : List[str] = hidden_size __magic_name__ : int = num_hidden_layers __magic_name__ : Optional[Any] = num_attention_heads __magic_name__ : Optional[Any] = intermediate_size __magic_name__ : Dict = hidden_act __magic_name__ : Any = hidden_dropout_prob __magic_name__ : str = attention_probs_dropout_prob __magic_name__ : Optional[Any] = max_position_embeddings __magic_name__ : Dict = type_vocab_size __magic_name__ : int = type_sequence_label_size __magic_name__ : Optional[Any] = initializer_range __magic_name__ : Union[str, Any] = num_labels __magic_name__ : Optional[int] = num_choices __magic_name__ : Optional[Any] = scope def __lowerCAmelCase ( self : str ) -> Optional[int]: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[str] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : Optional[int] = None if self.use_token_type_ids: __magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Optional[Any] = None __magic_name__ : List[str] = None __magic_name__ : Tuple = None if self.use_labels: __magic_name__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Dict ) -> List[str]: return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : str = self.get_config() __magic_name__ : List[Any] = 300 return config def __lowerCAmelCase ( self : int ) -> Tuple: ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = self.prepare_config_and_inputs() __magic_name__ : Optional[Any] = True __magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Tuple , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Union[str, Any] ) -> Dict: __magic_name__ : List[Any] = MraModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model(_A , attention_mask=_A , token_type_ids=_A ) __magic_name__ : int = model(_A , token_type_ids=_A ) __magic_name__ : str = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : List[str] , _A : List[Any] , _A : Optional[Any] , _A : str , _A : int , _A : List[Any] , _A : int , _A : List[Any] , ) -> str: __magic_name__ : Tuple = True __magic_name__ : Optional[Any] = MraModel(_A ) model.to(_A ) model.eval() __magic_name__ : int = model( _A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , ) __magic_name__ : Dict = model( _A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , ) __magic_name__ : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : Any , _A : Optional[Any] , _A : Tuple , _A : Union[str, Any] , _A : List[Any] , _A : List[Any] ) -> str: __magic_name__ : Dict = MraForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : Any , _A : List[str] , _A : Optional[Any] , _A : Tuple , _A : Tuple , _A : Tuple ) -> Optional[Any]: __magic_name__ : int = MraForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Union[str, Any] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : List[str] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , _A : Any , _A : Dict ) -> Dict: __magic_name__ : List[str] = self.num_labels __magic_name__ : List[str] = MraForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : List[Any] , _A : Dict , _A : Tuple , _A : Optional[Any] , _A : Any , _A : Optional[int] , _A : Optional[int] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Optional[Any] = MraForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : Optional[int] , _A : str , _A : int , _A : Tuple , _A : str , _A : Dict , _A : Any ) -> Optional[int]: __magic_name__ : Any = self.num_choices __magic_name__ : Optional[int] = MraForMultipleChoice(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ : Optional[Any] = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: __magic_name__ : Any = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : Optional[Any] = config_and_inputs __magic_name__ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) A_ : Dict = False A_ : Dict = False A_ : int = False A_ : Union[str, Any] = False A_ : List[Any] = () def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: __magic_name__ : Any = MraModelTester(self ) __magic_name__ : Any = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Any: __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Any ) -> Optional[Any]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ : Optional[Any] = type self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> List[Any]: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : List[Any] ) -> List[str]: for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : str = MraModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip(reason='MRA does not output attentions' ) def __lowerCAmelCase ( self : List[str] ) -> Dict: return @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : Dict ) -> List[Any]: __magic_name__ : int = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) __magic_name__ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __magic_name__ : Optional[Any] = model(_A )[0] __magic_name__ : str = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _A ) __magic_name__ : Optional[int] = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[str] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) __magic_name__ : Tuple = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __magic_name__ : Tuple = model(_A )[0] __magic_name__ : Tuple = 50265 __magic_name__ : str = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _A ) __magic_name__ : str = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: __magic_name__ : int = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) __magic_name__ : Optional[Any] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __magic_name__ : Optional[Any] = model(_A )[0] __magic_name__ : Optional[Any] = 50265 __magic_name__ : Optional[Any] = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , _A ) __magic_name__ : Union[str, Any] = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1E-4 ) )
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
1
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCamelCase ( lowerCAmelCase : Dict ): """simple docstring""" __magic_name__ : List[str] = np.max(_outputs , axis=-1 , keepdims=lowerCAmelCase ) __magic_name__ : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : List[Any] = """sigmoid""" A_ : Optional[Any] = """softmax""" A_ : str = """none""" @add_end_docstrings( lowercase__ , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = False A_ : int = ClassificationFunction.NONE def __init__( self : int , **_A : List[str] ) -> Union[str, Any]: super().__init__(**_A ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self : Any , _A : Optional[int]=None , _A : Union[str, Any]=None , _A : str="" , **_A : Optional[int] ) -> Dict: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" __magic_name__ : int = tokenizer_kwargs __magic_name__ : Dict = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: __magic_name__ : Tuple = self.model.config.return_all_scores if isinstance(_A , _A ) or top_k is None: __magic_name__ : str = top_k __magic_name__ : Optional[int] = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , _A , ) if return_all_scores: __magic_name__ : Dict = None else: __magic_name__ : Dict = 1 if isinstance(_A , _A ): __magic_name__ : Tuple = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: __magic_name__ : str = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : Optional[int] , *_A : List[Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Tuple = super().__call__(*_A , **_A ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. __magic_name__ : Dict = 'top_k' not in kwargs if isinstance(args[0] , _A ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self : Optional[int] , _A : str , **_A : List[str] ) -> Dict[str, GenericTensor]: __magic_name__ : Optional[int] = self.framework if isinstance(_A , _A ): return self.tokenizer(**_A , return_tensors=_A , **_A ) elif isinstance(_A , _A ) and len(_A ) == 1 and isinstance(inputs[0] , _A ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_A , **_A ) elif isinstance(_A , _A ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(_A , return_tensors=_A , **_A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Union[str, Any]: return self.model(**_A ) def __lowerCAmelCase ( self : Tuple , _A : List[str] , _A : Union[str, Any]=None , _A : Dict=1 , _A : Union[str, Any]=True ) -> Optional[int]: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: __magic_name__ : Union[str, Any] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: __magic_name__ : List[str] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: __magic_name__ : List[str] = self.model.config.function_to_apply else: __magic_name__ : int = ClassificationFunction.NONE __magic_name__ : Union[str, Any] = model_outputs['logits'][0] __magic_name__ : Dict = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: __magic_name__ : Tuple = sigmoid(_A ) elif function_to_apply == ClassificationFunction.SOFTMAX: __magic_name__ : Dict = softmax(_A ) elif function_to_apply == ClassificationFunction.NONE: __magic_name__ : Dict = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} __magic_name__ : str = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(_A ) ] if not _legacy: dict_scores.sort(key=lambda _A : x["score"] , reverse=_A ) if top_k is not None: __magic_name__ : List[Any] = dict_scores[:top_k] return dict_scores
331
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
1
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict ): """simple docstring""" __magic_name__ : Dict = k_size // 2 __magic_name__ , __magic_name__ : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] __magic_name__ : Dict = 1 / (2 * pi * sigma) * exp(-(square(lowerCAmelCase ) + square(lowerCAmelCase )) / (2 * square(lowerCAmelCase )) ) return g def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ): """simple docstring""" __magic_name__ , __magic_name__ : int = image.shape[0], image.shape[1] # dst image height and width __magic_name__ : List[Any] = height - k_size + 1 __magic_name__ : List[Any] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows __magic_name__ : Union[str, Any] = zeros((dst_height * dst_width, k_size * k_size) ) __magic_name__ : Optional[int] = 0 for i, j in product(range(lowerCAmelCase ) , range(lowerCAmelCase ) ): __magic_name__ : str = ravel(image[i : i + k_size, j : j + k_size] ) __magic_name__ : Optional[int] = window row += 1 # turn the kernel into shape(k*k, 1) __magic_name__ : Tuple = gen_gaussian_kernel(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Optional[int] = ravel(lowerCAmelCase ) # reshape and get the dst image __magic_name__ : str = dot(lowerCAmelCase , lowerCAmelCase ).reshape(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase ) return dst if __name__ == "__main__": # read original image lowerCAmelCase :Optional[int] = imread(r'''../image_data/lena.jpg''') # turn image in gray scale value lowerCAmelCase :Any = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size lowerCAmelCase :Optional[int] = gaussian_filter(gray, 3, sigma=1) lowerCAmelCase :List[str] = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('''gaussian filter with 3x3 mask''', gaussianaxa) imshow('''gaussian filter with 5x5 mask''', gaussianaxa) waitKey()
331
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :List[Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :List[Any] = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase :str = logging.get_logger(__name__) lowerCAmelCase :Optional[int] = { '''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''', # See all Marian models at https://huggingface.co/models?filter=marian } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = """marian""" A_ : int = ["""past_key_values"""] A_ : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Dict , _A : int=58101 , _A : Optional[Any]=None , _A : int=1024 , _A : Any=12 , _A : Optional[Any]=4096 , _A : Optional[Any]=16 , _A : Optional[Any]=12 , _A : Union[str, Any]=4096 , _A : Any=16 , _A : Tuple=0.0 , _A : Optional[Any]=0.0 , _A : Tuple=True , _A : str=True , _A : int="gelu" , _A : Tuple=1024 , _A : str=0.1 , _A : Tuple=0.0 , _A : Optional[int]=0.0 , _A : Any=0.02 , _A : Optional[Any]=58100 , _A : List[Any]=False , _A : Union[str, Any]=58100 , _A : int=0 , _A : Union[str, Any]=0 , _A : Union[str, Any]=True , **_A : str , ) -> Optional[Any]: __magic_name__ : Optional[Any] = vocab_size __magic_name__ : List[Any] = decoder_vocab_size or vocab_size __magic_name__ : Optional[Any] = max_position_embeddings __magic_name__ : str = d_model __magic_name__ : Dict = encoder_ffn_dim __magic_name__ : Optional[Any] = encoder_layers __magic_name__ : List[Any] = encoder_attention_heads __magic_name__ : Tuple = decoder_ffn_dim __magic_name__ : List[str] = decoder_layers __magic_name__ : Optional[int] = decoder_attention_heads __magic_name__ : Optional[int] = dropout __magic_name__ : Optional[Any] = attention_dropout __magic_name__ : Optional[Any] = activation_dropout __magic_name__ : str = activation_function __magic_name__ : int = init_std __magic_name__ : Optional[int] = encoder_layerdrop __magic_name__ : Tuple = decoder_layerdrop __magic_name__ : Optional[int] = use_cache __magic_name__ : Dict = encoder_layers __magic_name__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True __magic_name__ : Optional[int] = share_encoder_decoder_embeddings super().__init__( pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def __lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __magic_name__ : int = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __magic_name__ : str = {0: 'batch'} __magic_name__ : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __magic_name__ : List[Any] = {0: 'batch', 1: 'decoder_sequence'} __magic_name__ : List[str] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_A , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __magic_name__ : Optional[Any] = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __magic_name__ , __magic_name__ : Any = self.num_layers for i in range(_A ): __magic_name__ : str = {0: 'batch', 2: 'past_sequence + sequence'} __magic_name__ : Any = {0: 'batch', 2: 'past_sequence + sequence'} else: __magic_name__ : str = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def __lowerCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __magic_name__ : Tuple = super().outputs else: __magic_name__ : int = super(_A , self ).outputs if self.use_past: __magic_name__ , __magic_name__ : int = self.num_layers for i in range(_A ): __magic_name__ : Any = {0: 'batch', 2: 'past_sequence + sequence'} __magic_name__ : int = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def __lowerCAmelCase ( self : Optional[int] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: __magic_name__ : str = self._generate_dummy_inputs_for_encoder_and_decoder( _A , _A , _A , _A , _A ) # Generate decoder inputs __magic_name__ : int = seq_length if not self.use_past else 1 __magic_name__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder( _A , _A , _A , _A , _A ) __magic_name__ : Optional[int] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __magic_name__ : Tuple = dict(**_A , **_A ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __magic_name__ , __magic_name__ : Union[str, Any] = common_inputs['input_ids'].shape __magic_name__ : Optional[int] = common_inputs['decoder_input_ids'].shape[1] __magic_name__ , __magic_name__ : int = self.num_attention_heads __magic_name__ : List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ : Any = decoder_seq_length + 3 __magic_name__ : List[Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __magic_name__ : int = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(_A , _A )] , dim=1 ) __magic_name__ : Tuple = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __magic_name__ , __magic_name__ : int = self.num_layers __magic_name__ : Any = min(_A , _A ) __magic_name__ : Optional[int] = max(_A , _A ) - min_num_layers __magic_name__ : Optional[int] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(_A ): common_inputs["past_key_values"].append( ( torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), ) ) # TODO: test this. __magic_name__ : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(_A , _A ): common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) ) return common_inputs def __lowerCAmelCase ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: __magic_name__ : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder( _A , _A , _A , _A , _A ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __magic_name__ , __magic_name__ : Optional[int] = common_inputs['input_ids'].shape # Not using the same length for past_key_values __magic_name__ : List[str] = seqlen + 2 __magic_name__ , __magic_name__ : Optional[Any] = self.num_layers __magic_name__ , __magic_name__ : Optional[int] = self.num_attention_heads __magic_name__ : int = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ : Dict = common_inputs['attention_mask'].dtype __magic_name__ : Union[str, Any] = torch.cat( [common_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 ) __magic_name__ : Any = [ (torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A ) ] return common_inputs def __lowerCAmelCase ( self : List[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __magic_name__ : Tuple = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __magic_name__ : int = tokenizer.num_special_tokens_to_add(_A ) __magic_name__ : List[str] = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A ) # Generate dummy inputs according to compute batch and sequence __magic_name__ : str = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __magic_name__ : Union[str, Any] = dict(tokenizer(_A , return_tensors=_A ) ) return common_inputs def __lowerCAmelCase ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: __magic_name__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) else: __magic_name__ : str = self._generate_dummy_inputs_for_causal_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) return common_inputs def __lowerCAmelCase ( self : str , _A : Tuple , _A : List[str] , _A : Union[str, Any] , _A : List[str] ) -> Any: if self.task in ["default", "seq2seq-lm"]: __magic_name__ : Union[str, Any] = super()._flatten_past_key_values_(_A , _A , _A , _A ) else: __magic_name__ : List[Any] = super(_A , self )._flatten_past_key_values_( _A , _A , _A , _A ) @property def __lowerCAmelCase ( self : Union[str, Any] ) -> float: return 1E-4
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : int = 0 A_ : bool = False A_ : float = 3.0 class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Dict ) -> Any: # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=_A ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def __lowerCAmelCase ( self : Any ) -> List[str]: # If no defaults are changed, `to_kwargs` returns an empty dict. __magic_name__ : Dict = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() __magic_name__ : str = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __magic_name__ : Tuple = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , _A ) @require_multi_gpu def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: __magic_name__ : Union[str, Any] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(_A , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase :Any = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) lowerCAmelCase :int = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase :Any = torch.nn.Linear(1_0_0, 2_0_0) lowerCAmelCase :int = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase :Optional[Any] = '''''' lowerCAmelCase :Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
331
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase :Any = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]: __magic_name__ : str = {} if "candidate_labels" in kwargs: __magic_name__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __magic_name__ : Tuple = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int: __magic_name__ : Dict = load_image(_A ) __magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) __magic_name__ : Optional[Any] = candidate_labels __magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels] __magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) __magic_name__ : Optional[Any] = [text_inputs] return inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str: __magic_name__ : str = model_inputs.pop('candidate_labels' ) __magic_name__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _A ): __magic_name__ : Dict = text_inputs[0] else: # Batching case. __magic_name__ : Optional[Any] = text_inputs[0][0] __magic_name__ : List[Any] = self.model(**_A , **_A ) __magic_name__ : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]: __magic_name__ : Tuple = model_outputs.pop('candidate_labels' ) __magic_name__ : Union[str, Any] = model_outputs['logits'][0] if self.framework == "pt": __magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) __magic_name__ : Tuple = probs.tolist() if not isinstance(_A , _A ): __magic_name__ : Any = [scores] elif self.framework == "tf": __magic_name__ : Any = stable_softmax(_A , axis=-1 ) __magic_name__ : Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __magic_name__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
331
1
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowerCAmelCase :int = logging.get_logger(__name__) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ): """simple docstring""" return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def lowerCamelCase ( lowerCAmelCase : np.ndarray , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] = None ): """simple docstring""" __magic_name__ : str = tesseract_config if tesseract_config is not None else '' # apply OCR __magic_name__ : List[str] = to_pil_image(lowerCAmelCase ) __magic_name__ , __magic_name__ : Optional[int] = pil_image.size __magic_name__ : str = pytesseract.image_to_data(lowerCAmelCase , lang=lowerCAmelCase , output_type='dict' , config=lowerCAmelCase ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates __magic_name__ : Dict = [idx for idx, word in enumerate(lowerCAmelCase ) if not word.strip()] __magic_name__ : str = [word for idx, word in enumerate(lowerCAmelCase ) if idx not in irrelevant_indices] __magic_name__ : Dict = [coord for idx, coord in enumerate(lowerCAmelCase ) if idx not in irrelevant_indices] __magic_name__ : int = [coord for idx, coord in enumerate(lowerCAmelCase ) if idx not in irrelevant_indices] __magic_name__ : Union[str, Any] = [coord for idx, coord in enumerate(lowerCAmelCase ) if idx not in irrelevant_indices] __magic_name__ : Any = [coord for idx, coord in enumerate(lowerCAmelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __magic_name__ : List[str] = [] for x, y, w, h in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __magic_name__ : Tuple = [x, y, x + w, y + h] actual_boxes.append(lowerCAmelCase ) # finally, normalize the bounding boxes __magic_name__ : int = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) ) assert len(lowerCAmelCase ) == len(lowerCAmelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : List[Any] = ["""pixel_values"""] def __init__( self : Union[str, Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = "" , **_A : Union[str, Any] , ) -> None: super().__init__(**_A ) __magic_name__ : Any = size if size is not None else {'height': 224, 'width': 224} __magic_name__ : Any = get_size_dict(_A ) __magic_name__ : Tuple = do_resize __magic_name__ : Tuple = size __magic_name__ : Any = resample __magic_name__ : Optional[Any] = apply_ocr __magic_name__ : Dict = ocr_lang __magic_name__ : List[str] = tesseract_config def __lowerCAmelCase ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray: __magic_name__ : Dict = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) __magic_name__ : Dict = (size['height'], size['width']) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Dict , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Tuple , ) -> PIL.Image.Image: __magic_name__ : List[Any] = do_resize if do_resize is not None else self.do_resize __magic_name__ : List[str] = size if size is not None else self.size __magic_name__ : Tuple = get_size_dict(_A ) __magic_name__ : str = resample if resample is not None else self.resample __magic_name__ : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr __magic_name__ : str = ocr_lang if ocr_lang is not None else self.ocr_lang __magic_name__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config __magic_name__ : Tuple = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) # All transformations expect numpy arrays. __magic_name__ : str = [to_numpy_array(_A ) for image in images] if apply_ocr: requires_backends(self , 'pytesseract' ) __magic_name__ : Tuple = [] __magic_name__ : List[Any] = [] for image in images: __magic_name__ , __magic_name__ : Any = apply_tesseract(_A , _A , _A ) words_batch.append(_A ) boxes_batch.append(_A ) if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , resample=_A ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __magic_name__ : Dict = [flip_channel_order(_A ) for image in images] __magic_name__ : List[str] = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = BatchFeature(data={'pixel_values': images} , tensor_type=_A ) if apply_ocr: __magic_name__ : Optional[int] = words_batch __magic_name__ : List[str] = boxes_batch return data
331
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
1
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ : Dict = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __lowerCAmelCase ( self : List[Any] ) -> Tuple: __magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]] __magic_name__ : List[Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 ) __magic_name__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 ) __magic_name__ : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 ) __magic_name__ : Any = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
1
'''simple docstring''' import os def lowerCamelCase ( lowerCAmelCase : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(lowerCAmelCase ) , lowerCAmelCase ) ) as input_file: __magic_name__ : List[Any] = [ [int(lowerCAmelCase ) for element in line.split(',' )] for line in input_file.readlines() ] __magic_name__ : Optional[Any] = len(lowerCAmelCase ) __magic_name__ : Dict = len(matrix[0] ) __magic_name__ : Any = [[-1 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )] for i in range(lowerCAmelCase ): __magic_name__ : List[str] = matrix[i][0] for j in range(1 , lowerCAmelCase ): for i in range(lowerCAmelCase ): __magic_name__ : Tuple = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , lowerCAmelCase ): __magic_name__ : int = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __magic_name__ : Union[str, Any] = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'{solution() = }')
331
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class _lowerCamelCase : '''simple docstring''' def __init__( self : int , _A : Tuple , _A : List[Any]=13 , _A : Optional[int]=7 , _A : Union[str, Any]=True , _A : str=True , _A : str=False , _A : Dict=True , _A : Union[str, Any]=99 , _A : Dict=32 , _A : Optional[int]=5 , _A : Optional[int]=4 , _A : str=37 , _A : Any="gelu" , _A : Tuple=0.1 , _A : Tuple=0.1 , _A : Tuple=512 , _A : Optional[Any]=16 , _A : List[str]=2 , _A : Any=0.02 , _A : str=3 , _A : Any=4 , _A : Union[str, Any]=None , ) -> str: __magic_name__ : str = parent __magic_name__ : Dict = batch_size __magic_name__ : Dict = seq_length __magic_name__ : Optional[int] = is_training __magic_name__ : Any = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : Union[str, Any] = use_labels __magic_name__ : List[str] = vocab_size __magic_name__ : Any = hidden_size __magic_name__ : List[Any] = num_hidden_layers __magic_name__ : List[str] = num_attention_heads __magic_name__ : List[Any] = intermediate_size __magic_name__ : str = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : int = attention_probs_dropout_prob __magic_name__ : Optional[int] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Optional[int] = type_sequence_label_size __magic_name__ : int = initializer_range __magic_name__ : List[str] = num_labels __magic_name__ : str = num_choices __magic_name__ : Optional[int] = scope def __lowerCAmelCase ( self : Any ) -> Optional[Any]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : int = None if self.use_input_mask: __magic_name__ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : Dict = None if self.use_token_type_ids: __magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : str = None __magic_name__ : Tuple = None if self.use_labels: __magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Any = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , use_stable_embedding=_A , ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Any , _A : Any , _A : Optional[int] , _A : Tuple , _A : Optional[int] , _A : Any ) -> Tuple: __magic_name__ : List[Any] = OpenLlamaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : str = model(_A , attention_mask=_A ) __magic_name__ : str = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : Dict , _A : Optional[Any] , _A : int , _A : Optional[Any] , _A : Dict , _A : Tuple , _A : int , _A : Optional[Any] , _A : Any , _A : str , ) -> Optional[int]: __magic_name__ : Optional[int] = True __magic_name__ : List[Any] = OpenLlamaModel(_A ) model.to(_A ) model.eval() __magic_name__ : Tuple = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , ) __magic_name__ : Dict = model( _A , attention_mask=_A , encoder_hidden_states=_A , ) __magic_name__ : Union[str, Any] = model(_A , attention_mask=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : Tuple , _A : Optional[int] , _A : Dict , _A : str , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , ) -> Optional[Any]: __magic_name__ : Optional[Any] = OpenLlamaForCausalLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Any , _A : List[str] , _A : int , _A : Tuple , _A : Dict , _A : Dict , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Any , ) -> Any: __magic_name__ : Dict = True __magic_name__ : Optional[Any] = True __magic_name__ : Dict = OpenLlamaForCausalLM(config=_A ) model.to(_A ) model.eval() # first forward pass __magic_name__ : List[str] = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , ) __magic_name__ : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __magic_name__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) __magic_name__ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __magic_name__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) __magic_name__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 ) __magic_name__ : Union[str, Any] = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0] __magic_name__ : Any = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0] # select random slice __magic_name__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __magic_name__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() __magic_name__ : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) ) def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : Dict = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : str = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) A_ : Optional[int] = (OpenLlamaForCausalLM,) if is_torch_available() else () A_ : Union[str, Any] = ( { """feature-extraction""": OpenLlamaModel, """text-classification""": OpenLlamaForSequenceClassification, """text-generation""": OpenLlamaForCausalLM, """zero-shot""": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) A_ : List[str] = False A_ : List[Any] = False def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : Optional[Any] = OpenLlamaModelTester(self ) __magic_name__ : Any = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : int ) -> Optional[int]: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> Any: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ : Optional[int] = type self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: __magic_name__ , __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Tuple = 3 __magic_name__ : Dict = input_dict['input_ids'] __magic_name__ : Optional[Any] = input_ids.ne(1 ).to(_A ) __magic_name__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __magic_name__ : Union[str, Any] = OpenLlamaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : str = model(_A , attention_mask=_A , labels=_A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: __magic_name__ , __magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Tuple = 3 __magic_name__ : Union[str, Any] = 'single_label_classification' __magic_name__ : int = input_dict['input_ids'] __magic_name__ : List[Any] = input_ids.ne(1 ).to(_A ) __magic_name__ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __magic_name__ : Dict = OpenLlamaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , labels=_A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCAmelCase ( self : List[str] ) -> Dict: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = 3 __magic_name__ : List[Any] = 'multi_label_classification' __magic_name__ : Tuple = input_dict['input_ids'] __magic_name__ : List[Any] = input_ids.ne(1 ).to(_A ) __magic_name__ : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __magic_name__ : Tuple = OpenLlamaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : List[Any] = model(_A , attention_mask=_A , labels=_A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: pass @parameterized.expand([('linear',), ('dynamic',)] ) def __lowerCAmelCase ( self : Union[str, Any] , _A : Any ) -> Tuple: __magic_name__ , __magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = ids_tensor([1, 10] , config.vocab_size ) __magic_name__ : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __magic_name__ : List[Any] = OpenLlamaModel(_A ) original_model.to(_A ) original_model.eval() __magic_name__ : List[Any] = original_model(_A ).last_hidden_state __magic_name__ : List[str] = original_model(_A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __magic_name__ : Dict = {'type': scaling_type, 'factor': 10.0} __magic_name__ : List[str] = OpenLlamaModel(_A ) scaled_model.to(_A ) scaled_model.eval() __magic_name__ : List[str] = scaled_model(_A ).last_hidden_state __magic_name__ : Tuple = scaled_model(_A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_A , _A , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_A , _A , atol=1E-5 ) )
331
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :List[str] = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :List[str] = [ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = [ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
1
'''simple docstring''' import math def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if ( not isinstance(lowerCAmelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if ( not isinstance(lowerCAmelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
331
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
1
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration lowerCAmelCase :str = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] lowerCAmelCase :Tuple = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] lowerCAmelCase :Union[str, Any] = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) lowerCAmelCase :Tuple = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) lowerCAmelCase :Dict = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : int ): """simple docstring""" for tf_name, hf_name in patterns: __magic_name__ : Optional[int] = k.replace(lowerCAmelCase , lowerCAmelCase ) return k def lowerCamelCase ( lowerCAmelCase : dict , lowerCAmelCase : dict ): """simple docstring""" __magic_name__ : Tuple = BigBirdPegasusConfig(**lowerCAmelCase ) __magic_name__ : Tuple = BigBirdPegasusForConditionalGeneration(lowerCAmelCase ) __magic_name__ : Union[str, Any] = torch_model.state_dict() __magic_name__ : Union[str, Any] = {} # separating decoder weights __magic_name__ : str = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )} __magic_name__ : List[str] = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )} for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ): __magic_name__ : List[Any] = [k.endswith(lowerCAmelCase ) for ending in KEYS_TO_IGNORE] if any(lowerCAmelCase ): continue __magic_name__ : Optional[Any] = DECODER_PATTERNS __magic_name__ : Dict = rename_state_dict_key(lowerCAmelCase , lowerCAmelCase ) if new_k not in state_dict: raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): __magic_name__ : Union[str, Any] = v.T __magic_name__ : Any = torch.from_numpy(lowerCAmelCase ) assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ): __magic_name__ : Dict = [k.endswith(lowerCAmelCase ) for ending in KEYS_TO_IGNORE] if any(lowerCAmelCase ): continue __magic_name__ : List[str] = REMAINING_PATTERNS __magic_name__ : List[str] = rename_state_dict_key(lowerCAmelCase , lowerCAmelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): __magic_name__ : int = v.T __magic_name__ : str = torch.from_numpy(lowerCAmelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' __magic_name__ : List[str] = mapping['model.embed_positions.weight'] __magic_name__ : Optional[Any] = mapping.pop('model.embed_positions.weight' ) __magic_name__ , __magic_name__ : Optional[Any] = torch_model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) __magic_name__ : Tuple = [ k for k in missing if k not in [ 'final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight', ] ] assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}' assert extra == [], f'no matches found for the following tf keys {extra}' return torch_model def lowerCamelCase ( lowerCAmelCase : List[str] ): """simple docstring""" __magic_name__ : int = tf.train.list_variables(lowerCAmelCase ) __magic_name__ : List[Any] = {} __magic_name__ : List[Any] = ['global_step'] for name, shape in tqdm(lowerCAmelCase , desc='converting tf checkpoint to dict' ): __magic_name__ : Dict = any(pat in name for pat in ignore_name ) if skip_key: continue __magic_name__ : Union[str, Any] = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : int = array return tf_weights def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : dict ): """simple docstring""" __magic_name__ : Union[str, Any] = get_tf_weights_as_numpy(lowerCAmelCase ) __magic_name__ : Dict = convert_bigbird_pegasus(lowerCAmelCase , lowerCAmelCase ) torch_model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase :Any = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase :Tuple = parser.parse_args() lowerCAmelCase :Optional[Any] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
331
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __lowerCAmelCase ( self : Dict ) -> List[str]: torch.manual_seed(0 ) __magic_name__ : Union[str, Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : List[Any] = self.dummy_uncond_unet __magic_name__ : Tuple = ScoreSdeVeScheduler() __magic_name__ : List[str] = ScoreSdeVePipeline(unet=_A , scheduler=_A ) sde_ve.to(_A ) sde_ve.set_progress_bar_config(disable=_A ) __magic_name__ : Tuple = torch.manual_seed(0 ) __magic_name__ : Optional[int] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_A ).images __magic_name__ : Optional[int] = torch.manual_seed(0 ) __magic_name__ : List[Any] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_A , return_dict=_A )[ 0 ] __magic_name__ : Optional[int] = image[0, -3:, -3:, -1] __magic_name__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __magic_name__ : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Dict ) -> str: __magic_name__ : List[Any] = 'google/ncsnpp-church-256' __magic_name__ : str = UNetaDModel.from_pretrained(_A ) __magic_name__ : List[str] = ScoreSdeVeScheduler.from_pretrained(_A ) __magic_name__ : Tuple = ScoreSdeVePipeline(unet=_A , scheduler=_A ) sde_ve.to(_A ) sde_ve.set_progress_bar_config(disable=_A ) __magic_name__ : Any = torch.manual_seed(0 ) __magic_name__ : str = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_A ).images __magic_name__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __magic_name__ : Union[str, Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
331
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
1
'''simple docstring''' from __future__ import annotations from cmath import sqrt def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) __magic_name__ : Any = b * b - 4 * a * c __magic_name__ : str = (-b + sqrt(lowerCAmelCase )) / (2 * a) __magic_name__ : Dict = (-b - sqrt(lowerCAmelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Any = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
331
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
1
'''simple docstring''' def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
331
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
1
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers lowerCAmelCase :Union[str, Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[Any] = os.path.dirname(os.path.realpath(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'words.txt' ) __magic_name__ : List[str] = '' with open(lowerCAmelCase ) as f: __magic_name__ : Dict = f.readline() __magic_name__ : Dict = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] __magic_name__ : Any = [ word for word in [sum(ord(lowerCAmelCase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(lowerCAmelCase ) if __name__ == "__main__": print(solution())
331
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
1
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowerCAmelCase :List[Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : str=2 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : int = 10 , lowerCAmelCase : int = 2 ): """simple docstring""" def get_dataset(lowerCAmelCase : Any ): __magic_name__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __magic_name__ : Optional[int] = get_dataset(lowerCAmelCase ) __magic_name__ : List[str] = get_dataset(lowerCAmelCase ) __magic_name__ : List[Any] = DataLoader(lowerCAmelCase , shuffle=lowerCAmelCase , batch_size=lowerCAmelCase , num_workers=4 ) __magic_name__ : Optional[int] = DataLoader(lowerCAmelCase , shuffle=lowerCAmelCase , batch_size=lowerCAmelCase , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : str=None ): """simple docstring""" __magic_name__ : Optional[int] = [] for epoch in range(lowerCAmelCase ): # Train quickly model.train() for batch in dataloader: __magic_name__ , __magic_name__ : int = batch __magic_name__ : Optional[Any] = model(lowerCAmelCase ) __magic_name__ : Tuple = torch.nn.functional.mse_loss(lowerCAmelCase , lowerCAmelCase ) accelerator.backward(lowerCAmelCase ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : str ) -> Optional[Any]: super().__init__() __magic_name__ : Optional[int] = nn.Parameter(torch.randn(1 ) ) __magic_name__ : Union[str, Any] = nn.Parameter(torch.randn(1 ) ) def __lowerCAmelCase ( self : List[Any] , _A : Optional[int] ) -> List[Any]: return x * self.a + self.b class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : str ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __magic_name__ : List[str] = DummyModel() __magic_name__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __magic_name__ , __magic_name__ : List[str] = dummy_dataloaders() __magic_name__ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A ) # Train baseline __magic_name__ : Optional[int] = Accelerator(project_config=_A ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : str = accelerator.prepare( _A , _A , _A , _A ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __magic_name__ : int = DummyModel() __magic_name__ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __magic_name__ , __magic_name__ : Tuple = dummy_dataloaders() # Train baseline __magic_name__ : List[str] = Accelerator() __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[str] = accelerator.prepare( _A , _A , _A , _A ) # Save initial __magic_name__ : Optional[int] = os.path.join(_A , 'initial' ) accelerator.save_state(_A ) ((__magic_name__) , (__magic_name__)) : int = model.a.item(), model.b.item() __magic_name__ : List[Any] = optimizer.state_dict() __magic_name__ : List[Any] = train(3 , _A , _A , _A , _A ) ((__magic_name__) , (__magic_name__)) : List[str] = model.a.item(), model.b.item() __magic_name__ : Tuple = optimizer.state_dict() # Train partially set_seed(42 ) __magic_name__ : List[str] = DummyModel() __magic_name__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __magic_name__ , __magic_name__ : Any = dummy_dataloaders() __magic_name__ : str = Accelerator() __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : int = accelerator.prepare( _A , _A , _A , _A ) accelerator.load_state(_A ) ((__magic_name__) , (__magic_name__)) : Tuple = model.a.item(), model.b.item() __magic_name__ : Optional[int] = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) __magic_name__ : str = train(2 , _A , _A , _A , _A ) # Save everything __magic_name__ : Union[str, Any] = os.path.join(_A , 'checkpoint' ) accelerator.save_state(_A ) # Load everything back in and make sure all states work accelerator.load_state(_A ) test_rands += train(1 , _A , _A , _A , _A ) ((__magic_name__) , (__magic_name__)) : Optional[Any] = model.a.item(), model.b.item() __magic_name__ : List[str] = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Any ) -> List[str]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __magic_name__ : Tuple = DummyModel() __magic_name__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __magic_name__ , __magic_name__ : str = dummy_dataloaders() __magic_name__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=_A ) # Train baseline __magic_name__ : Dict = Accelerator(project_dir=_A , project_config=_A ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[str] = accelerator.prepare( _A , _A , _A , _A ) # Save initial accelerator.save_state() ((__magic_name__) , (__magic_name__)) : List[str] = model.a.item(), model.b.item() __magic_name__ : Optional[Any] = optimizer.state_dict() __magic_name__ : List[str] = train(3 , _A , _A , _A , _A ) ((__magic_name__) , (__magic_name__)) : str = model.a.item(), model.b.item() __magic_name__ : int = optimizer.state_dict() # Train partially set_seed(42 ) __magic_name__ : Tuple = DummyModel() __magic_name__ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __magic_name__ , __magic_name__ : List[str] = dummy_dataloaders() __magic_name__ : Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A ) __magic_name__ : List[str] = Accelerator(project_dir=_A , project_config=_A ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple = accelerator.prepare( _A , _A , _A , _A ) accelerator.load_state(os.path.join(_A , 'checkpoints' , 'checkpoint_0' ) ) ((__magic_name__) , (__magic_name__)) : Dict = model.a.item(), model.b.item() __magic_name__ : Optional[Any] = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) __magic_name__ : str = train(2 , _A , _A , _A , _A ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_A , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , _A , _A , _A , _A ) ((__magic_name__) , (__magic_name__)) : int = model.a.item(), model.b.item() __magic_name__ : int = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: __magic_name__ : List[str] = torch.tensor([1, 2, 3] ) __magic_name__ : str = torch.tensor([2, 3, 4] ) __magic_name__ : Union[str, Any] = DummyModel() __magic_name__ : Tuple = torch.optim.Adam(net.parameters() ) __magic_name__ : Tuple = Accelerator() with self.assertRaises(_A ) as ve: accelerator.register_for_checkpointing(_A , _A , _A , _A ) __magic_name__ : Union[str, Any] = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def __lowerCAmelCase ( self : int ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __magic_name__ : Tuple = DummyModel() __magic_name__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __magic_name__ : Any = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 ) __magic_name__ , __magic_name__ : Union[str, Any] = dummy_dataloaders() __magic_name__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_A ) # Train baseline __magic_name__ : List[Any] = Accelerator(project_dir=_A , project_config=_A ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : int = accelerator.prepare( _A , _A , _A , _A , _A ) # Save initial accelerator.save_state() __magic_name__ : Tuple = scheduler.state_dict() train(3 , _A , _A , _A , _A , _A ) self.assertNotEqual(_A , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_A , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(_A , scheduler.state_dict() ) def __lowerCAmelCase ( self : Union[str, Any] ) -> str: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __magic_name__ : int = DummyModel() __magic_name__ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 ) # Train baseline __magic_name__ : int = Accelerator(project_dir=_A , project_config=_A ) __magic_name__ : List[str] = accelerator.prepare(_A ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_A , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def __lowerCAmelCase ( self : Dict ) -> List[Any]: __magic_name__ : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(_A , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase :Any = '''/tmp/accelerate/state_checkpointing''' lowerCAmelCase :Union[str, Any] = DummyModel() lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3) lowerCAmelCase :Any = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowerCAmelCase , lowerCAmelCase :Optional[Any] = dummy_dataloaders() lowerCAmelCase :List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowerCAmelCase :Union[str, Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :List[Any] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowerCAmelCase , lowerCAmelCase :Union[str, Any] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowerCAmelCase :List[Any] = group['''params'''][0].device break assert param_device.type == accelerator.device.type lowerCAmelCase :int = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''') for group in optimizer.param_groups: lowerCAmelCase :str = group['''params'''][0].device break assert ( param_device.type == torch.device('''cpu''').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''') for group in optimizer.param_groups: lowerCAmelCase :Union[str, Any] = group['''params'''][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''): accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
331
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
1
'''simple docstring''' from collections import deque class _lowerCamelCase : '''simple docstring''' def __init__( self : List[str] , _A : str , _A : int , _A : int ) -> None: __magic_name__ : Optional[Any] = process_name # process name __magic_name__ : List[Any] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __magic_name__ : Tuple = arrival_time __magic_name__ : Optional[Any] = burst_time # remaining burst time __magic_name__ : Union[str, Any] = 0 # total time of the process wait in ready queue __magic_name__ : Dict = 0 # time from arrival time to completion time class _lowerCamelCase : '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : list[int] , _A : deque[Process] , _A : int , ) -> None: # total number of mlfq's queues __magic_name__ : List[Any] = number_of_queues # time slice of queues that round robin algorithm applied __magic_name__ : str = time_slices # unfinished process is in this ready_queue __magic_name__ : int = queue # current time __magic_name__ : Tuple = current_time # finished process is in this sequence queue __magic_name__ : deque[Process] = deque() def __lowerCAmelCase ( self : int ) -> list[str]: __magic_name__ : List[str] = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __lowerCAmelCase ( self : Optional[int] , _A : list[Process] ) -> list[int]: __magic_name__ : List[str] = [] for i in range(len(_A ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __lowerCAmelCase ( self : Tuple , _A : list[Process] ) -> list[int]: __magic_name__ : str = [] for i in range(len(_A ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __lowerCAmelCase ( self : List[str] , _A : list[Process] ) -> list[int]: __magic_name__ : int = [] for i in range(len(_A ) ): completion_times.append(queue[i].stop_time ) return completion_times def __lowerCAmelCase ( self : Any , _A : deque[Process] ) -> list[int]: return [q.burst_time for q in queue] def __lowerCAmelCase ( self : Dict , _A : Process ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __lowerCAmelCase ( self : List[str] , _A : deque[Process] ) -> deque[Process]: __magic_name__ : deque[Process] = deque() # sequence deque of finished process while len(_A ) != 0: __magic_name__ : Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_A ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __magic_name__ : Optional[Any] = 0 # set the process's turnaround time because it is finished __magic_name__ : Union[str, Any] = self.current_time - cp.arrival_time # set the completion time __magic_name__ : Union[str, Any] = self.current_time # add the process to queue that has finished queue finished.append(_A ) self.finish_queue.extend(_A ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __lowerCAmelCase ( self : Any , _A : deque[Process] , _A : int ) -> tuple[deque[Process], deque[Process]]: __magic_name__ : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_A ) ): __magic_name__ : str = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_A ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __magic_name__ : Dict = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_A ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __magic_name__ : int = 0 # set the finish time __magic_name__ : Optional[int] = self.current_time # update the process' turnaround time because it is finished __magic_name__ : List[str] = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_A ) self.finish_queue.extend(_A ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __lowerCAmelCase ( self : Optional[Any] ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): __magic_name__ , __magic_name__ : Any = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCAmelCase :Dict = Process('''P1''', 0, 5_3) lowerCAmelCase :Dict = Process('''P2''', 0, 1_7) lowerCAmelCase :Dict = Process('''P3''', 0, 6_8) lowerCAmelCase :Any = Process('''P4''', 0, 2_4) lowerCAmelCase :Optional[int] = 3 lowerCAmelCase :Optional[int] = [1_7, 2_5] lowerCAmelCase :Optional[Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) lowerCAmelCase :int = Process('''P1''', 0, 5_3) lowerCAmelCase :int = Process('''P2''', 0, 1_7) lowerCAmelCase :Any = Process('''P3''', 0, 6_8) lowerCAmelCase :List[str] = Process('''P4''', 0, 2_4) lowerCAmelCase :Dict = 3 lowerCAmelCase :Union[str, Any] = [1_7, 2_5] lowerCAmelCase :Optional[Any] = deque([Pa, Pa, Pa, Pa]) lowerCAmelCase :Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) lowerCAmelCase :str = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( F'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( F'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(lowerCAmelCase , int(b / 2 ) ) * actual_power(lowerCAmelCase , int(b / 2 ) ) else: return a * actual_power(lowerCAmelCase , int(b / 2 ) ) * actual_power(lowerCAmelCase , int(b / 2 ) ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" if b < 0: return 1 / actual_power(lowerCAmelCase , lowerCAmelCase ) return actual_power(lowerCAmelCase , lowerCAmelCase ) if __name__ == "__main__": print(power(-2, -3))
331
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :Optional[int] = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
1
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ): """simple docstring""" assert isinstance(lowerCAmelCase , lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" __magic_name__ : Any = tmp_path / 'cache' __magic_name__ : Union[str, Any] = {'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ : Dict = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read() _check_text_dataset(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'features' , [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : Tuple = tmp_path / 'cache' __magic_name__ : Optional[Any] = {'text': 'string'} __magic_name__ : List[str] = features.copy() if features else default_expected_features __magic_name__ : Any = ( Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ : Dict = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read() _check_text_dataset(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ): """simple docstring""" __magic_name__ : List[Any] = tmp_path / 'cache' __magic_name__ : int = {'text': 'string'} __magic_name__ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read() _check_text_dataset(lowerCAmelCase , lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ): """simple docstring""" if issubclass(lowerCAmelCase , lowerCAmelCase ): __magic_name__ : Optional[Any] = text_path elif issubclass(lowerCAmelCase , lowerCAmelCase ): __magic_name__ : Union[str, Any] = [text_path] __magic_name__ : Optional[Any] = tmp_path / 'cache' __magic_name__ : Tuple = {'text': 'string'} __magic_name__ : Any = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read() _check_text_dataset(lowerCAmelCase , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=("train",) ): """simple docstring""" assert isinstance(lowerCAmelCase , lowerCAmelCase ) for split in splits: __magic_name__ : Tuple = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ): """simple docstring""" __magic_name__ : Optional[Any] = tmp_path / 'cache' __magic_name__ : List[Any] = {'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ : str = TextDatasetReader({'train': text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read() _check_text_datasetdict(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'features' , [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict ): """simple docstring""" __magic_name__ : Union[str, Any] = tmp_path / 'cache' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ : Union[str, Any] = {'text': 'string'} __magic_name__ : Optional[Any] = features.copy() if features else default_expected_features __magic_name__ : str = ( Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ : List[Any] = TextDatasetReader({'train': text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read() _check_text_datasetdict(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ): """simple docstring""" if split: __magic_name__ : List[Any] = {split: text_path} else: __magic_name__ : Optional[int] = 'train' __magic_name__ : Optional[Any] = {'train': text_path, 'test': text_path} __magic_name__ : Any = tmp_path / 'cache' __magic_name__ : Union[str, Any] = {'text': 'string'} __magic_name__ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read() _check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
331
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
1
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : str , _A : Dict[str, int] , _A : List[str] , _A : int = None , _A : int = None ) -> Dict: super().__init__() __magic_name__ : str = pad_token_id __magic_name__ : Any = max_length __magic_name__ : Tuple = vocab __magic_name__ : List[Any] = merges __magic_name__ : str = BytePairTokenizer(_A , _A , sequence_length=_A ) @classmethod def __lowerCAmelCase ( cls : Dict , _A : GPTaTokenizer , *_A : Optional[int] , **_A : int ) -> Any: __magic_name__ : Tuple = [' '.join(_A ) for m in tokenizer.bpe_ranks.keys()] __magic_name__ : Dict = tokenizer.get_vocab() return cls(_A , _A , *_A , **_A ) @classmethod def __lowerCAmelCase ( cls : Tuple , _A : Union[str, os.PathLike] , *_A : Any , **_A : List[str] ) -> Optional[int]: __magic_name__ : Union[str, Any] = GPTaTokenizer.from_pretrained(_A , *_A , **_A ) return cls.from_tokenizer(_A , *_A , **_A ) @classmethod def __lowerCAmelCase ( cls : int , _A : Dict ) -> Tuple: return cls(**_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def __lowerCAmelCase ( self : Dict , _A : str , _A : int = None ) -> Union[str, Any]: __magic_name__ : Any = self.tf_tokenizer(_A ) __magic_name__ : Any = tf.ones_like(_A ) if self.pad_token_id is not None: # pad the tokens up to max length __magic_name__ : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: __magic_name__ , __magic_name__ : List[Any] = pad_model_inputs( _A , max_seq_length=_A , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
331
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
1
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
1
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig lowerCAmelCase :int = logging.get_logger(__name__) lowerCAmelCase :Dict = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Optional[int] = """dpt""" def __init__( self : Optional[Any] , _A : Dict=768 , _A : Union[str, Any]=12 , _A : List[str]=12 , _A : int=3072 , _A : str="gelu" , _A : Tuple=0.0 , _A : Tuple=0.0 , _A : Any=0.02 , _A : List[str]=1E-12 , _A : Optional[Any]=384 , _A : Union[str, Any]=16 , _A : Optional[int]=3 , _A : str=False , _A : Dict=True , _A : int=[2, 5, 8, 11] , _A : str="project" , _A : List[str]=[4, 2, 1, 0.5] , _A : Union[str, Any]=[96, 192, 384, 768] , _A : Optional[Any]=256 , _A : List[str]=-1 , _A : Optional[Any]=False , _A : Dict=True , _A : Any=0.4 , _A : List[Any]=255 , _A : Union[str, Any]=0.1 , _A : Any=[1, 1024, 24, 24] , _A : List[str]=[0, 1] , _A : List[Any]=None , **_A : Optional[int] , ) -> Tuple: super().__init__(**_A ) __magic_name__ : Tuple = hidden_size __magic_name__ : Dict = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) __magic_name__ : str = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } __magic_name__ : Optional[Any] = BitConfig(**_A ) elif isinstance(_A , _A ): logger.info('Initializing the config with a `BiT` backbone.' ) __magic_name__ : str = BitConfig(**_A ) elif isinstance(_A , _A ): __magic_name__ : Tuple = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) __magic_name__ : Optional[Any] = backbone_featmap_shape __magic_name__ : Tuple = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: __magic_name__ : str = None __magic_name__ : Union[str, Any] = None __magic_name__ : str = [] __magic_name__ : Optional[int] = num_hidden_layers __magic_name__ : Tuple = num_attention_heads __magic_name__ : Optional[int] = intermediate_size __magic_name__ : Optional[int] = hidden_act __magic_name__ : Optional[int] = hidden_dropout_prob __magic_name__ : Any = attention_probs_dropout_prob __magic_name__ : Optional[int] = initializer_range __magic_name__ : Any = layer_norm_eps __magic_name__ : int = image_size __magic_name__ : Any = patch_size __magic_name__ : Dict = num_channels __magic_name__ : Optional[Any] = qkv_bias __magic_name__ : Union[str, Any] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) __magic_name__ : str = readout_type __magic_name__ : str = reassemble_factors __magic_name__ : List[Any] = neck_hidden_sizes __magic_name__ : Any = fusion_hidden_size __magic_name__ : List[Any] = head_in_index __magic_name__ : Dict = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __magic_name__ : Any = use_auxiliary_head __magic_name__ : Optional[Any] = auxiliary_loss_weight __magic_name__ : int = semantic_loss_ignore_index __magic_name__ : int = semantic_classifier_dropout def __lowerCAmelCase ( self : Dict ) -> Any: __magic_name__ : Dict = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __magic_name__ : Any = self.backbone_config.to_dict() __magic_name__ : Tuple = self.__class__.model_type return output
331
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase :Any = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]: __magic_name__ : str = {} if "candidate_labels" in kwargs: __magic_name__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __magic_name__ : Tuple = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int: __magic_name__ : Dict = load_image(_A ) __magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) __magic_name__ : Optional[Any] = candidate_labels __magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels] __magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) __magic_name__ : Optional[Any] = [text_inputs] return inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str: __magic_name__ : str = model_inputs.pop('candidate_labels' ) __magic_name__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _A ): __magic_name__ : Dict = text_inputs[0] else: # Batching case. __magic_name__ : Optional[Any] = text_inputs[0][0] __magic_name__ : List[Any] = self.model(**_A , **_A ) __magic_name__ : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]: __magic_name__ : Tuple = model_outputs.pop('candidate_labels' ) __magic_name__ : Union[str, Any] = model_outputs['logits'][0] if self.framework == "pt": __magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) __magic_name__ : Tuple = probs.tolist() if not isinstance(_A , _A ): __magic_name__ : Any = [scores] elif self.framework == "tf": __magic_name__ : Any = stable_softmax(_A , axis=-1 ) __magic_name__ : Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __magic_name__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase :Dict = { '''configuration_blip_2''': [ '''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Blip2Config''', '''Blip2QFormerConfig''', '''Blip2VisionConfig''', ], '''processing_blip_2''': ['''Blip2Processor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = [ '''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Blip2Model''', '''Blip2QFormerModel''', '''Blip2PreTrainedModel''', '''Blip2ForConditionalGeneration''', '''Blip2VisionModel''', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowerCAmelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
1
'''simple docstring''' from scipy.stats import pearsonr import datasets lowerCAmelCase :List[Any] = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' lowerCAmelCase :List[Any] = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' lowerCAmelCase :Optional[int] = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCamelCase ( datasets.Metric ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Tuple , _A : int=False ) -> Optional[int]: if return_pvalue: __magic_name__ : Tuple = pearsonr(_A , _A ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(_A , _A )[0] )}
331
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ : Dict = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __lowerCAmelCase ( self : List[Any] ) -> Tuple: __magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]] __magic_name__ : List[Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 ) __magic_name__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 ) __magic_name__ : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 ) __magic_name__ : Any = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase :List[Any] = logging.get_logger(__name__) lowerCAmelCase :str = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : List[str] = """megatron-bert""" def __init__( self : int , _A : List[str]=29056 , _A : Dict=1024 , _A : Union[str, Any]=24 , _A : List[Any]=16 , _A : List[str]=4096 , _A : int="gelu" , _A : List[Any]=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : List[Any]=2 , _A : List[str]=0.02 , _A : int=1E-12 , _A : List[Any]=0 , _A : Union[str, Any]="absolute" , _A : Any=True , **_A : Optional[int] , ) -> int: super().__init__(pad_token_id=_A , **_A ) __magic_name__ : str = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Tuple = num_hidden_layers __magic_name__ : int = num_attention_heads __magic_name__ : int = hidden_act __magic_name__ : Optional[Any] = intermediate_size __magic_name__ : Optional[Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : Dict = max_position_embeddings __magic_name__ : Union[str, Any] = type_vocab_size __magic_name__ : str = initializer_range __magic_name__ : Tuple = layer_norm_eps __magic_name__ : Optional[Any] = position_embedding_type __magic_name__ : int = use_cache
331
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
1
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
1
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union lowerCAmelCase :Dict = TypeVar('''T''') lowerCAmelCase :Optional[int] = Union[List[T], Tuple[T, ...]] lowerCAmelCase :int = Union[T, List[T], Dict[str, T]] lowerCAmelCase :List[Any] = Union[str, bytes, os.PathLike]
331
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) lowerCAmelCase :Optional[Any] = logging.get_logger(__name__) lowerCAmelCase :Optional[int] = OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) lowerCAmelCase :Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: __magic_name__ : int = model_type_to_module_name(lowerCAmelCase ) __magic_name__ : str = importlib.import_module(f'.{module_name}' , 'transformers.models' ) try: return getattr(lowerCAmelCase , lowerCAmelCase ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase , '__name__' , lowerCAmelCase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __magic_name__ : List[str] = importlib.import_module('transformers' ) if hasattr(lowerCAmelCase , lowerCAmelCase ): return getattr(lowerCAmelCase , lowerCAmelCase ) return None def lowerCamelCase ( lowerCAmelCase : Union[str, os.PathLike] , lowerCAmelCase : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[Dict[str, str]] = None , lowerCAmelCase : Optional[Union[bool, str]] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : bool = False , **lowerCAmelCase : Optional[Any] , ): """simple docstring""" __magic_name__ : str = get_file_from_repo( lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , ) if resolved_config_file is None: logger.info( 'Could not locate the feature extractor configuration file, will try to use the model config instead.' ) return {} with open(lowerCAmelCase , encoding='utf-8' ) as reader: return json.load(lowerCAmelCase ) class _lowerCamelCase : '''simple docstring''' def __init__( self : str ) -> Optional[int]: raise EnvironmentError( 'AutoFeatureExtractor is designed to be instantiated ' 'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_A ) def __lowerCAmelCase ( cls : List[str] , _A : List[Any] , **_A : Tuple ) -> Optional[Any]: __magic_name__ : Tuple = kwargs.pop('config' , _A ) __magic_name__ : Dict = kwargs.pop('trust_remote_code' , _A ) __magic_name__ : str = True __magic_name__ , __magic_name__ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_A , **_A ) __magic_name__ : List[str] = config_dict.get('feature_extractor_type' , _A ) __magic_name__ : Dict = None if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ): __magic_name__ : str = config_dict['auto_map']['AutoFeatureExtractor'] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_A , _A ): __magic_name__ : int = AutoConfig.from_pretrained(_A , **_A ) # It could be in `config.feature_extractor_type`` __magic_name__ : Tuple = getattr(_A , 'feature_extractor_type' , _A ) if hasattr(_A , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map: __magic_name__ : List[str] = config.auto_map['AutoFeatureExtractor'] if feature_extractor_class is not None: __magic_name__ : Optional[int] = feature_extractor_class_from_name(_A ) __magic_name__ : Optional[int] = feature_extractor_auto_map is not None __magic_name__ : List[Any] = feature_extractor_class is not None or type(_A ) in FEATURE_EXTRACTOR_MAPPING __magic_name__ : List[str] = resolve_trust_remote_code( _A , _A , _A , _A ) if has_remote_code and trust_remote_code: __magic_name__ : Tuple = get_class_from_dynamic_module( _A , _A , **_A ) __magic_name__ : Dict = kwargs.pop('code_revision' , _A ) if os.path.isdir(_A ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_A , **_A ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_A , **_A ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_A ) in FEATURE_EXTRACTOR_MAPPING: __magic_name__ : int = FEATURE_EXTRACTOR_MAPPING[type(_A )] return feature_extractor_class.from_dict(_A , **_A ) raise ValueError( F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ' F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ' F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' ) @staticmethod def __lowerCAmelCase ( _A : str , _A : int ) -> str: FEATURE_EXTRACTOR_MAPPING.register(_A , _A )
331
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
1
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def lowerCamelCase ( lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : int = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' f'{test_file} instead.' ) __magic_name__ : Dict = components[-1] if not test_fn.endswith('py' ): raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' ) if not test_fn.startswith('test_modeling_' ): raise ValueError( f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' ) __magic_name__ : Dict = components[:-1] + [test_fn.replace('.py' , '' )] __magic_name__ : List[Any] = '.'.join(lowerCAmelCase ) return test_module_path def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" __magic_name__ : int = get_module_path(lowerCAmelCase ) __magic_name__ : int = importlib.import_module(lowerCAmelCase ) return test_module def lowerCamelCase ( lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : Tuple = [] __magic_name__ : List[str] = get_test_module(lowerCAmelCase ) for attr in dir(lowerCAmelCase ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(lowerCAmelCase , lowerCAmelCase ) ) # sort with class names return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ ) def lowerCamelCase ( lowerCAmelCase : List[str] ): """simple docstring""" __magic_name__ : Optional[Any] = [] __magic_name__ : List[Any] = get_test_module(lowerCAmelCase ) for attr in dir(lowerCAmelCase ): __magic_name__ : int = getattr(lowerCAmelCase , lowerCAmelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __magic_name__ : List[Any] = getattr(lowerCAmelCase , 'all_model_classes' , [] ) if len(lowerCAmelCase ) > 0: test_classes.append(lowerCAmelCase ) # sort with class names return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ ) def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" __magic_name__ : Union[str, Any] = get_test_classes(lowerCAmelCase ) __magic_name__ : Union[str, Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ ) def lowerCamelCase ( lowerCAmelCase : List[Any] ): """simple docstring""" __magic_name__ : Optional[Any] = test_class() if hasattr(lowerCAmelCase , 'setUp' ): test.setUp() __magic_name__ : str = None if hasattr(lowerCAmelCase , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __magic_name__ : str = test.model_tester.__class__ return model_tester def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : str = get_test_classes(lowerCAmelCase ) __magic_name__ : Tuple = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowerCAmelCase ) # sort with class names return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" __magic_name__ : List[Any] = get_test_classes_for_model(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : List[str] = [] for test_class in test_classes: __magic_name__ : int = get_model_tester_from_test_class(lowerCAmelCase ) if tester_class is not None: tester_classes.append(lowerCAmelCase ) # sort with class names return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ ) def lowerCamelCase ( lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : Optional[int] = get_test_classes(lowerCAmelCase ) __magic_name__ : Optional[Any] = {test_class: get_model_tester_from_test_class(lowerCAmelCase ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" __magic_name__ : List[Any] = get_model_classes(lowerCAmelCase ) __magic_name__ : Tuple = { model_class: get_test_classes_for_model(lowerCAmelCase , lowerCAmelCase ) for model_class in model_classes } return model_test_mapping def lowerCamelCase ( lowerCAmelCase : List[Any] ): """simple docstring""" __magic_name__ : List[Any] = get_model_classes(lowerCAmelCase ) __magic_name__ : Optional[int] = { model_class: get_tester_classes_for_model(lowerCAmelCase , lowerCAmelCase ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase ( lowerCAmelCase : Any ): """simple docstring""" if isinstance(lowerCAmelCase , lowerCAmelCase ): return o elif isinstance(lowerCAmelCase , lowerCAmelCase ): return o.__name__ elif isinstance(lowerCAmelCase , (list, tuple) ): return [to_json(lowerCAmelCase ) for x in o] elif isinstance(lowerCAmelCase , lowerCAmelCase ): return {to_json(lowerCAmelCase ): to_json(lowerCAmelCase ) for k, v in o.items()} else: return o
331
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
1
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
1
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCAmelCase :List[str] = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : int , *_A : Optional[Any] , **_A : Tuple ) -> Dict: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type(_A ) def __call__( self : Tuple , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : Optional[int] ) -> Optional[int]: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : str , **_A : Union[str, Any] ) -> int: return {}, {}, {} def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> List[str]: __magic_name__ : List[str] = load_image(_A ) __magic_name__ : Tuple = image.size __magic_name__ : Union[str, Any] = self.image_processor(images=_A , return_tensors=self.framework ) return model_inputs def __lowerCAmelCase ( self : Optional[Any] , _A : List[str] ) -> Tuple: __magic_name__ : Tuple = self.model(**_A ) return model_outputs def __lowerCAmelCase ( self : int , _A : Any ) -> List[str]: __magic_name__ : List[str] = model_outputs.predicted_depth __magic_name__ : Dict = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=_A ) __magic_name__ : Optional[int] = prediction.squeeze().cpu().numpy() __magic_name__ : str = (output * 255 / np.max(_A )).astype('uint8' ) __magic_name__ : List[Any] = Image.fromarray(_A ) __magic_name__ : List[Any] = {} __magic_name__ : Union[str, Any] = predicted_depth __magic_name__ : List[Any] = depth return output_dict
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
1
'''simple docstring''' import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : str , _A : int , _A : Any=14 , _A : Any=7 , _A : Any=True , _A : Union[str, Any]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[Any]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Union[str, Any]=5 , _A : Dict=4 , _A : List[str]=37 , _A : Optional[int]="gelu" , _A : Union[str, Any]=0.1 , _A : int=0.1 , _A : str=512 , _A : List[str]=16 , _A : str=2 , _A : int=0.02 , _A : str=3 , _A : Union[str, Any]=4 , _A : Any=None , ) -> Tuple: __magic_name__ : Tuple = parent __magic_name__ : Any = batch_size __magic_name__ : Any = seq_length __magic_name__ : Optional[int] = is_training __magic_name__ : Optional[Any] = use_token_type_ids __magic_name__ : Dict = use_input_mask __magic_name__ : Optional[Any] = use_labels __magic_name__ : Optional[Any] = use_mc_token_ids __magic_name__ : List[str] = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Union[str, Any] = num_hidden_layers __magic_name__ : Optional[Any] = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : int = hidden_act __magic_name__ : Tuple = hidden_dropout_prob __magic_name__ : Tuple = attention_probs_dropout_prob __magic_name__ : Optional[int] = max_position_embeddings __magic_name__ : Tuple = type_vocab_size __magic_name__ : Optional[Any] = type_sequence_label_size __magic_name__ : str = initializer_range __magic_name__ : Dict = num_labels __magic_name__ : Optional[Any] = num_choices __magic_name__ : int = scope __magic_name__ : Union[str, Any] = self.vocab_size - 1 def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Tuple = None if self.use_input_mask: __magic_name__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : Optional[int] = None if self.use_token_type_ids: __magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Any = None if self.use_mc_token_ids: __magic_name__ : Dict = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __magic_name__ : Union[str, Any] = None __magic_name__ : str = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : List[str] = self.get_config() __magic_name__ : int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __lowerCAmelCase ( self : Dict ) -> Dict: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __lowerCAmelCase ( self : Optional[Any] , _A : Dict , _A : Dict , _A : Optional[int] , _A : Optional[int] , _A : List[str] , *_A : Optional[Any] ) -> int: __magic_name__ : List[Any] = CTRLModel(config=_A ) model.to(_A ) model.eval() model(_A , token_type_ids=_A , head_mask=_A ) model(_A , token_type_ids=_A ) __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Dict , _A : List[Any] , _A : Union[str, Any] , _A : Dict , *_A : Any ) -> List[str]: __magic_name__ : Optional[Any] = CTRLLMHeadModel(_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Tuple = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : Union[str, Any] = config_and_inputs __magic_name__ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask} return config, inputs_dict def __lowerCAmelCase ( self : str , _A : Any , _A : Optional[int] , _A : List[Any] , _A : Optional[Any] , *_A : List[str] ) -> Dict: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Tuple = CTRLForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Union[str, Any] = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ : str = (CTRLLMHeadModel,) if is_torch_available() else () A_ : Optional[Any] = ( { """feature-extraction""": CTRLModel, """text-classification""": CTRLForSequenceClassification, """text-generation""": CTRLLMHeadModel, """zero-shot""": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ : Any = True A_ : int = False A_ : List[Any] = False def __lowerCAmelCase ( self : Dict , _A : Any , _A : Any , _A : str , _A : Optional[Any] , _A : Union[str, Any] ) -> Tuple: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __lowerCAmelCase ( self : str ) -> str: __magic_name__ : List[Any] = CTRLModelTester(self ) __magic_name__ : Optional[int] = ConfigTester(self , config_class=_A , n_embd=37 ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : Union[str, Any] ) -> int: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Dict ) -> Any: __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple: pass @slow def __lowerCAmelCase ( self : int ) -> Optional[Any]: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = CTRLModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def __lowerCAmelCase ( self : List[Any] ) -> int: pass @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: __magic_name__ : Union[str, Any] = CTRLLMHeadModel.from_pretrained('ctrl' ) model.to(_A ) __magic_name__ : Any = torch.tensor( [[11859, 0, 1611, 8]] , dtype=torch.long , device=_A ) # Legal the president is __magic_name__ : List[Any] = [ 11859, 0, 1611, 8, 5, 150, 26449, 2, 19, 348, 469, 3, 2595, 48, 20740, 246533, 246533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __magic_name__ : Any = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].tolist() , _A )
331
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :List[Any] = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''MBartTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''MBartTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :List[str] = [ '''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MBartForCausalLM''', '''MBartForConditionalGeneration''', '''MBartForQuestionAnswering''', '''MBartForSequenceClassification''', '''MBartModel''', '''MBartPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :List[Any] = [ '''TFMBartForConditionalGeneration''', '''TFMBartModel''', '''TFMBartPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = [ '''FlaxMBartForConditionalGeneration''', '''FlaxMBartForQuestionAnswering''', '''FlaxMBartForSequenceClassification''', '''FlaxMBartModel''', '''FlaxMBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys lowerCAmelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
1
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase :Optional[int] = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : str=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , ): """simple docstring""" if attention_mask is None: __magic_name__ : str = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __magic_name__ : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __magic_name__ : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __magic_name__ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __magic_name__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _lowerCamelCase : '''simple docstring''' def __init__( self : str , _A : List[Any] , _A : Any=13 , _A : Dict=7 , _A : Tuple=True , _A : Dict=False , _A : int=99 , _A : str=16 , _A : Union[str, Any]=2 , _A : Tuple=4 , _A : int=4 , _A : Optional[int]="gelu" , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : Tuple=32 , _A : List[Any]=2 , _A : Optional[Any]=1 , _A : Any=0 , _A : Dict=0.02 , ) -> Optional[Any]: __magic_name__ : int = parent __magic_name__ : List[Any] = batch_size __magic_name__ : List[Any] = seq_length __magic_name__ : int = is_training __magic_name__ : Union[str, Any] = use_labels __magic_name__ : Dict = vocab_size __magic_name__ : Dict = hidden_size __magic_name__ : Any = num_hidden_layers __magic_name__ : Any = num_attention_heads __magic_name__ : Dict = intermediate_size __magic_name__ : Optional[Any] = hidden_act __magic_name__ : str = hidden_dropout_prob __magic_name__ : Optional[Any] = attention_probs_dropout_prob __magic_name__ : int = max_position_embeddings __magic_name__ : str = eos_token_id __magic_name__ : Any = pad_token_id __magic_name__ : Any = bos_token_id __magic_name__ : Any = initializer_range def __lowerCAmelCase ( self : Tuple ) -> List[Any]: __magic_name__ : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __magic_name__ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __magic_name__ : Optional[Any] = shift_tokens_right(_A , 1 , 2 ) __magic_name__ : Dict = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , ) __magic_name__ : Optional[int] = prepare_blenderbot_inputs_dict(_A , _A , _A ) return config, inputs_dict def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: __magic_name__ , __magic_name__ : Any = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self : List[str] , _A : Any , _A : Dict , _A : Tuple ) -> Dict: __magic_name__ : str = 20 __magic_name__ : List[Any] = model_class_name(_A ) __magic_name__ : Union[str, Any] = model.encode(inputs_dict['input_ids'] ) __magic_name__ , __magic_name__ : Any = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) __magic_name__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) __magic_name__ : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) __magic_name__ : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __magic_name__ : List[Any] = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) __magic_name__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) __magic_name__ : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , ) __magic_name__ : str = model.decode(_A , _A ) __magic_name__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) def __lowerCAmelCase ( self : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Optional[Any] ) -> List[str]: __magic_name__ : Union[str, Any] = 20 __magic_name__ : Optional[Any] = model_class_name(_A ) __magic_name__ : Optional[Any] = model.encode(inputs_dict['input_ids'] ) __magic_name__ , __magic_name__ : str = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) __magic_name__ : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __magic_name__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A ) __magic_name__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __magic_name__ : Any = model.decode( decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , ) __magic_name__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) __magic_name__ : Optional[Any] = model.decode( decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , ) __magic_name__ : Dict = model.decode(_A , _A , decoder_attention_mask=_A ) __magic_name__ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) @require_flax class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : List[Any] = 99 def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Tuple = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __magic_name__ : Optional[Any] = input_ids.shape[0] __magic_name__ : Optional[int] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def __lowerCAmelCase ( self : str ) -> Dict: __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = self._get_config_and_data() __magic_name__ : Tuple = FlaxBlenderbotForConditionalGeneration(_A ) __magic_name__ : Union[str, Any] = lm_model(input_ids=_A ) __magic_name__ : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , _A ) def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Dict = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __magic_name__ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_A ) __magic_name__ : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __magic_name__ : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __magic_name__ : List[Any] = lm_model(input_ids=_A , decoder_input_ids=_A ) __magic_name__ : Union[str, Any] = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , _A ) def __lowerCAmelCase ( self : str ) -> str: __magic_name__ : Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __magic_name__ : int = shift_tokens_right(_A , 1 , 2 ) __magic_name__ : List[str] = np.equal(_A , 1 ).astype(np.floataa ).sum() __magic_name__ : List[str] = np.equal(_A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _lowerCamelCase ( lowercase__ , unittest.TestCase , lowercase__ ): '''simple docstring''' A_ : int = True A_ : Union[str, Any] = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) A_ : List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Any = FlaxBlenderbotModelTester(self ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ , __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_A , _A , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __magic_name__ : Union[str, Any] = self._prepare_for_class(_A , _A ) __magic_name__ : Any = model_class(_A ) @jax.jit def encode_jitted(_A : Tuple , _A : Tuple=None , **_A : List[Any] ): return model.encode(input_ids=_A , attention_mask=_A ) with self.subTest('JIT Enabled' ): __magic_name__ : List[Any] = encode_jitted(**_A ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __magic_name__ : List[str] = encode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: __magic_name__ , __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __magic_name__ : List[str] = model_class(_A ) __magic_name__ : List[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) __magic_name__ : Tuple = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_A : List[str] , _A : Any , _A : int ): return model.decode( decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , ) with self.subTest('JIT Enabled' ): __magic_name__ : Tuple = decode_jitted(**_A ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __magic_name__ : Any = decode_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCAmelCase ( self : Any ) -> int: for model_class_name in self.all_model_classes: __magic_name__ : List[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __magic_name__ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id __magic_name__ : List[str] = model(_A ) self.assertIsNotNone(_A ) @unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' ) @slow def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25} __magic_name__ : List[str] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True} __magic_name__ : int = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_A ) __magic_name__ : List[Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' ) __magic_name__ : str = ['Sam'] __magic_name__ : List[Any] = tokenizer(_A , return_tensors='jax' ) __magic_name__ : Optional[Any] = model.generate(**_A , **_A ) __magic_name__ : Tuple = 'Sam is a great name. It means "sun" in Gaelic.' __magic_name__ : List[str] = tokenizer.batch_decode(_A , **_A ) assert generated_txt[0].strip() == tgt_text
331
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase :Tuple = logging.get_logger(__name__) lowerCAmelCase :List[str] = { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''', } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Any = """mvp""" A_ : Any = ["""past_key_values"""] A_ : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : int , _A : Optional[int]=50267 , _A : Optional[Any]=1024 , _A : Any=12 , _A : Tuple=4096 , _A : Optional[int]=16 , _A : Any=12 , _A : Union[str, Any]=4096 , _A : Union[str, Any]=16 , _A : Dict=0.0 , _A : Any=0.0 , _A : int="gelu" , _A : Union[str, Any]=1024 , _A : str=0.1 , _A : List[str]=0.0 , _A : Dict=0.0 , _A : Union[str, Any]=0.02 , _A : Optional[int]=0.0 , _A : Dict=False , _A : Optional[Any]=True , _A : Dict=1 , _A : Any=0 , _A : Tuple=2 , _A : List[str]=True , _A : Optional[int]=2 , _A : int=2 , _A : Dict=False , _A : List[Any]=100 , _A : Any=800 , **_A : Any , ) -> Tuple: __magic_name__ : int = vocab_size __magic_name__ : Optional[Any] = max_position_embeddings __magic_name__ : str = d_model __magic_name__ : Any = encoder_ffn_dim __magic_name__ : Dict = encoder_layers __magic_name__ : Optional[int] = encoder_attention_heads __magic_name__ : Any = decoder_ffn_dim __magic_name__ : Any = decoder_layers __magic_name__ : Any = decoder_attention_heads __magic_name__ : int = dropout __magic_name__ : str = attention_dropout __magic_name__ : int = activation_dropout __magic_name__ : Union[str, Any] = activation_function __magic_name__ : Any = init_std __magic_name__ : Dict = encoder_layerdrop __magic_name__ : Any = decoder_layerdrop __magic_name__ : List[Any] = classifier_dropout __magic_name__ : str = use_cache __magic_name__ : Union[str, Any] = encoder_layers __magic_name__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True __magic_name__ : List[Any] = use_prompt __magic_name__ : List[str] = prompt_length __magic_name__ : Optional[int] = prompt_mid_dim super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _A ): __magic_name__ : str = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
331
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
1
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple , _A : Optional[int] , _A : int , _A : Dict ) -> int: self.assertEqual(len(_A ) , len(_A ) ) for a, b in zip(_A , _A ): self.assertAlmostEqual(_A , _A , delta=_A ) def __lowerCAmelCase ( self : int ) -> Union[str, Any]: __magic_name__ : Optional[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_A ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def __lowerCAmelCase ( self : Optional[Any] ) -> int: __magic_name__ : int = None ops.enable_eager_execution_internal() __magic_name__ : Union[str, Any] = tf.config.list_physical_devices('CPU' ) if len(_A ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __magic_name__ : Union[str, Any] = tf.config.list_logical_devices(device_type='CPU' ) __magic_name__ : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __magic_name__ : Dict = GradientAccumulator() __magic_name__ : int = tf.Variable([4.0, 3.0] ) __magic_name__ , __magic_name__ : str = create_optimizer(5E-5 , 10 , 5 ) __magic_name__ : Tuple = tf.Variable([0.0, 0.0] , trainable=_A ) def accumulate_on_replica(_A : Optional[int] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(_A : Dict , _A : int ): with strategy.scope(): __magic_name__ : Optional[Any] = strategy.experimental_local_results(_A ) local_variables[0].assign(_A ) local_variables[1].assign(_A ) strategy.run(_A , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_A ) def _check_local_values(_A : Union[str, Any] , _A : List[Any] ): __magic_name__ : List[str] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , _A , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , _A , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
331
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCAmelCase :str = logging.get_logger(__name__) lowerCAmelCase :List[str] = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = """gpt_neo""" A_ : Optional[Any] = ["""past_key_values"""] A_ : str = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : int , _A : int=50257 , _A : List[str]=2048 , _A : Dict=2048 , _A : int=24 , _A : str=[[["global", "local"], 12]] , _A : Tuple=16 , _A : Dict=None , _A : List[Any]=256 , _A : str="gelu_new" , _A : List[str]=0.0 , _A : str=0.0 , _A : Optional[int]=0.0 , _A : Optional[int]=0.1 , _A : int=1E-5 , _A : Optional[int]=0.02 , _A : int=True , _A : Optional[int]=50256 , _A : Any=50256 , **_A : Union[str, Any] , ) -> Any: __magic_name__ : List[str] = vocab_size __magic_name__ : List[str] = max_position_embeddings __magic_name__ : List[Any] = hidden_size __magic_name__ : Any = num_layers __magic_name__ : Tuple = num_heads __magic_name__ : Union[str, Any] = intermediate_size __magic_name__ : Dict = window_size __magic_name__ : Union[str, Any] = activation_function __magic_name__ : Optional[Any] = resid_dropout __magic_name__ : Tuple = embed_dropout __magic_name__ : List[str] = attention_dropout __magic_name__ : Optional[int] = classifier_dropout __magic_name__ : Dict = layer_norm_epsilon __magic_name__ : int = initializer_range __magic_name__ : Optional[Any] = use_cache __magic_name__ : Dict = bos_token_id __magic_name__ : Tuple = eos_token_id __magic_name__ : str = attention_types __magic_name__ : Optional[int] = self.expand_attention_types_params(_A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) @staticmethod def __lowerCAmelCase ( _A : Union[str, Any] ) -> List[Any]: __magic_name__ : Optional[int] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ): """simple docstring""" import torch __magic_name__ : Any = input.size() __magic_name__ : Optional[int] = len(lowerCAmelCase ) __magic_name__ : Tuple = shape[dimension] __magic_name__ : Union[str, Any] = torch.arange(0 , lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Dict = torch.div(sizedim - size , lowerCAmelCase , rounding_mode='floor' ) + 1 __magic_name__ : int = torch.arange(lowerCAmelCase ) + low_indices[:min_length][:, None] __magic_name__ : str = [slice(lowerCAmelCase )] * rank __magic_name__ : Tuple = indices __magic_name__ : str = input[s] __magic_name__ : Optional[int] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Dict ): """simple docstring""" import torch __magic_name__ : Optional[int] = torch.arange(1 , lowerCAmelCase ) __magic_name__ : List[str] = torch.remainder(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Optional[Any] = remainders == 0 __magic_name__ : List[Any] = candidates[divisor_indices] __magic_name__ : Dict = torch.max(lowerCAmelCase ) return largest_divisor, torch.div(lowerCAmelCase , lowerCAmelCase , rounding_mode='floor' ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' @property def __lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: __magic_name__ : Union[str, Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(_A , direction='inputs' ) __magic_name__ : Any = {0: 'batch', 1: 'past_sequence + sequence'} else: __magic_name__ : Optional[int] = {0: 'batch', 1: 'sequence'} return common_inputs @property def __lowerCAmelCase ( self : Dict ) -> int: return self._config.num_heads def __lowerCAmelCase ( self : Dict , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: __magic_name__ : Any = super(_A , self ).generate_dummy_inputs( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) # We need to order the input in the way they appears in the forward() __magic_name__ : List[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __magic_name__ , __magic_name__ : Tuple = common_inputs['input_ids'].shape # Not using the same length for past_key_values __magic_name__ : List[str] = seqlen + 2 __magic_name__ : Any = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __magic_name__ : str = [ (torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers ) ] __magic_name__ : List[Any] = common_inputs['attention_mask'] if self.use_past: __magic_name__ : List[str] = ordered_inputs['attention_mask'].dtype __magic_name__ : Dict = torch.cat( [ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self : Optional[int] ) -> int: return 13
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
1